1 //===-- X86FrameLowering.cpp - X86 Frame Information ----------------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This file contains the X86 implementation of TargetFrameLowering class. 10 // 11 //===----------------------------------------------------------------------===// 12 13 #include "X86FrameLowering.h" 14 #include "X86InstrBuilder.h" 15 #include "X86InstrInfo.h" 16 #include "X86MachineFunctionInfo.h" 17 #include "X86Subtarget.h" 18 #include "X86TargetMachine.h" 19 #include "llvm/ADT/SmallSet.h" 20 #include "llvm/ADT/Statistic.h" 21 #include "llvm/Analysis/EHPersonalities.h" 22 #include "llvm/CodeGen/MachineFrameInfo.h" 23 #include "llvm/CodeGen/MachineFunction.h" 24 #include "llvm/CodeGen/MachineInstrBuilder.h" 25 #include "llvm/CodeGen/MachineModuleInfo.h" 26 #include "llvm/CodeGen/MachineRegisterInfo.h" 27 #include "llvm/CodeGen/WinEHFuncInfo.h" 28 #include "llvm/IR/DataLayout.h" 29 #include "llvm/IR/Function.h" 30 #include "llvm/MC/MCAsmInfo.h" 31 #include "llvm/MC/MCObjectFileInfo.h" 32 #include "llvm/MC/MCSymbol.h" 33 #include "llvm/Support/Debug.h" 34 #include "llvm/Target/TargetOptions.h" 35 #include <cstdlib> 36 37 #define DEBUG_TYPE "x86-fl" 38 39 STATISTIC(NumFrameLoopProbe, "Number of loop stack probes used in prologue"); 40 STATISTIC(NumFrameExtraProbe, 41 "Number of extra stack probes generated in prologue"); 42 43 using namespace llvm; 44 45 X86FrameLowering::X86FrameLowering(const X86Subtarget &STI, 46 MaybeAlign StackAlignOverride) 47 : TargetFrameLowering(StackGrowsDown, StackAlignOverride.valueOrOne(), 48 STI.is64Bit() ? -8 : -4), 49 STI(STI), TII(*STI.getInstrInfo()), TRI(STI.getRegisterInfo()) { 50 // Cache a bunch of frame-related predicates for this subtarget. 51 SlotSize = TRI->getSlotSize(); 52 Is64Bit = STI.is64Bit(); 53 IsLP64 = STI.isTarget64BitLP64(); 54 // standard x86_64 and NaCl use 64-bit frame/stack pointers, x32 - 32-bit. 55 Uses64BitFramePtr = STI.isTarget64BitLP64() || STI.isTargetNaCl64(); 56 StackPtr = TRI->getStackRegister(); 57 } 58 59 bool X86FrameLowering::hasReservedCallFrame(const MachineFunction &MF) const { 60 return !MF.getFrameInfo().hasVarSizedObjects() && 61 !MF.getInfo<X86MachineFunctionInfo>()->getHasPushSequences() && 62 !MF.getInfo<X86MachineFunctionInfo>()->hasPreallocatedCall(); 63 } 64 65 /// canSimplifyCallFramePseudos - If there is a reserved call frame, the 66 /// call frame pseudos can be simplified. Having a FP, as in the default 67 /// implementation, is not sufficient here since we can't always use it. 68 /// Use a more nuanced condition. 69 bool 70 X86FrameLowering::canSimplifyCallFramePseudos(const MachineFunction &MF) const { 71 return hasReservedCallFrame(MF) || 72 MF.getInfo<X86MachineFunctionInfo>()->hasPreallocatedCall() || 73 (hasFP(MF) && !TRI->hasStackRealignment(MF)) || 74 TRI->hasBasePointer(MF); 75 } 76 77 // needsFrameIndexResolution - Do we need to perform FI resolution for 78 // this function. Normally, this is required only when the function 79 // has any stack objects. However, FI resolution actually has another job, 80 // not apparent from the title - it resolves callframesetup/destroy 81 // that were not simplified earlier. 82 // So, this is required for x86 functions that have push sequences even 83 // when there are no stack objects. 84 bool 85 X86FrameLowering::needsFrameIndexResolution(const MachineFunction &MF) const { 86 return MF.getFrameInfo().hasStackObjects() || 87 MF.getInfo<X86MachineFunctionInfo>()->getHasPushSequences(); 88 } 89 90 /// hasFP - Return true if the specified function should have a dedicated frame 91 /// pointer register. This is true if the function has variable sized allocas 92 /// or if frame pointer elimination is disabled. 93 bool X86FrameLowering::hasFP(const MachineFunction &MF) const { 94 const MachineFrameInfo &MFI = MF.getFrameInfo(); 95 return (MF.getTarget().Options.DisableFramePointerElim(MF) || 96 TRI->hasStackRealignment(MF) || MFI.hasVarSizedObjects() || 97 MFI.isFrameAddressTaken() || MFI.hasOpaqueSPAdjustment() || 98 MF.getInfo<X86MachineFunctionInfo>()->getForceFramePointer() || 99 MF.getInfo<X86MachineFunctionInfo>()->hasPreallocatedCall() || 100 MF.callsUnwindInit() || MF.hasEHFunclets() || MF.callsEHReturn() || 101 MFI.hasStackMap() || MFI.hasPatchPoint() || 102 MFI.hasCopyImplyingStackAdjustment()); 103 } 104 105 static unsigned getSUBriOpcode(bool IsLP64, int64_t Imm) { 106 if (IsLP64) { 107 if (isInt<8>(Imm)) 108 return X86::SUB64ri8; 109 return X86::SUB64ri32; 110 } else { 111 if (isInt<8>(Imm)) 112 return X86::SUB32ri8; 113 return X86::SUB32ri; 114 } 115 } 116 117 static unsigned getADDriOpcode(bool IsLP64, int64_t Imm) { 118 if (IsLP64) { 119 if (isInt<8>(Imm)) 120 return X86::ADD64ri8; 121 return X86::ADD64ri32; 122 } else { 123 if (isInt<8>(Imm)) 124 return X86::ADD32ri8; 125 return X86::ADD32ri; 126 } 127 } 128 129 static unsigned getSUBrrOpcode(bool IsLP64) { 130 return IsLP64 ? X86::SUB64rr : X86::SUB32rr; 131 } 132 133 static unsigned getADDrrOpcode(bool IsLP64) { 134 return IsLP64 ? X86::ADD64rr : X86::ADD32rr; 135 } 136 137 static unsigned getANDriOpcode(bool IsLP64, int64_t Imm) { 138 if (IsLP64) { 139 if (isInt<8>(Imm)) 140 return X86::AND64ri8; 141 return X86::AND64ri32; 142 } 143 if (isInt<8>(Imm)) 144 return X86::AND32ri8; 145 return X86::AND32ri; 146 } 147 148 static unsigned getLEArOpcode(bool IsLP64) { 149 return IsLP64 ? X86::LEA64r : X86::LEA32r; 150 } 151 152 static bool isEAXLiveIn(MachineBasicBlock &MBB) { 153 for (MachineBasicBlock::RegisterMaskPair RegMask : MBB.liveins()) { 154 unsigned Reg = RegMask.PhysReg; 155 156 if (Reg == X86::RAX || Reg == X86::EAX || Reg == X86::AX || 157 Reg == X86::AH || Reg == X86::AL) 158 return true; 159 } 160 161 return false; 162 } 163 164 /// Check if the flags need to be preserved before the terminators. 165 /// This would be the case, if the eflags is live-in of the region 166 /// composed by the terminators or live-out of that region, without 167 /// being defined by a terminator. 168 static bool 169 flagsNeedToBePreservedBeforeTheTerminators(const MachineBasicBlock &MBB) { 170 for (const MachineInstr &MI : MBB.terminators()) { 171 bool BreakNext = false; 172 for (const MachineOperand &MO : MI.operands()) { 173 if (!MO.isReg()) 174 continue; 175 Register Reg = MO.getReg(); 176 if (Reg != X86::EFLAGS) 177 continue; 178 179 // This terminator needs an eflags that is not defined 180 // by a previous another terminator: 181 // EFLAGS is live-in of the region composed by the terminators. 182 if (!MO.isDef()) 183 return true; 184 // This terminator defines the eflags, i.e., we don't need to preserve it. 185 // However, we still need to check this specific terminator does not 186 // read a live-in value. 187 BreakNext = true; 188 } 189 // We found a definition of the eflags, no need to preserve them. 190 if (BreakNext) 191 return false; 192 } 193 194 // None of the terminators use or define the eflags. 195 // Check if they are live-out, that would imply we need to preserve them. 196 for (const MachineBasicBlock *Succ : MBB.successors()) 197 if (Succ->isLiveIn(X86::EFLAGS)) 198 return true; 199 200 return false; 201 } 202 203 /// emitSPUpdate - Emit a series of instructions to increment / decrement the 204 /// stack pointer by a constant value. 205 void X86FrameLowering::emitSPUpdate(MachineBasicBlock &MBB, 206 MachineBasicBlock::iterator &MBBI, 207 const DebugLoc &DL, 208 int64_t NumBytes, bool InEpilogue) const { 209 bool isSub = NumBytes < 0; 210 uint64_t Offset = isSub ? -NumBytes : NumBytes; 211 MachineInstr::MIFlag Flag = 212 isSub ? MachineInstr::FrameSetup : MachineInstr::FrameDestroy; 213 214 uint64_t Chunk = (1LL << 31) - 1; 215 216 MachineFunction &MF = *MBB.getParent(); 217 const X86Subtarget &STI = MF.getSubtarget<X86Subtarget>(); 218 const X86TargetLowering &TLI = *STI.getTargetLowering(); 219 const bool EmitInlineStackProbe = TLI.hasInlineStackProbe(MF); 220 221 // It's ok to not take into account large chunks when probing, as the 222 // allocation is split in smaller chunks anyway. 223 if (EmitInlineStackProbe && !InEpilogue) { 224 225 // This pseudo-instruction is going to be expanded, potentially using a 226 // loop, by inlineStackProbe(). 227 BuildMI(MBB, MBBI, DL, TII.get(X86::STACKALLOC_W_PROBING)).addImm(Offset); 228 return; 229 } else if (Offset > Chunk) { 230 // Rather than emit a long series of instructions for large offsets, 231 // load the offset into a register and do one sub/add 232 unsigned Reg = 0; 233 unsigned Rax = (unsigned)(Is64Bit ? X86::RAX : X86::EAX); 234 235 if (isSub && !isEAXLiveIn(MBB)) 236 Reg = Rax; 237 else 238 Reg = TRI->findDeadCallerSavedReg(MBB, MBBI); 239 240 unsigned MovRIOpc = Is64Bit ? X86::MOV64ri : X86::MOV32ri; 241 unsigned AddSubRROpc = 242 isSub ? getSUBrrOpcode(Is64Bit) : getADDrrOpcode(Is64Bit); 243 if (Reg) { 244 BuildMI(MBB, MBBI, DL, TII.get(MovRIOpc), Reg) 245 .addImm(Offset) 246 .setMIFlag(Flag); 247 MachineInstr *MI = BuildMI(MBB, MBBI, DL, TII.get(AddSubRROpc), StackPtr) 248 .addReg(StackPtr) 249 .addReg(Reg); 250 MI->getOperand(3).setIsDead(); // The EFLAGS implicit def is dead. 251 return; 252 } else if (Offset > 8 * Chunk) { 253 // If we would need more than 8 add or sub instructions (a >16GB stack 254 // frame), it's worth spilling RAX to materialize this immediate. 255 // pushq %rax 256 // movabsq +-$Offset+-SlotSize, %rax 257 // addq %rsp, %rax 258 // xchg %rax, (%rsp) 259 // movq (%rsp), %rsp 260 assert(Is64Bit && "can't have 32-bit 16GB stack frame"); 261 BuildMI(MBB, MBBI, DL, TII.get(X86::PUSH64r)) 262 .addReg(Rax, RegState::Kill) 263 .setMIFlag(Flag); 264 // Subtract is not commutative, so negate the offset and always use add. 265 // Subtract 8 less and add 8 more to account for the PUSH we just did. 266 if (isSub) 267 Offset = -(Offset - SlotSize); 268 else 269 Offset = Offset + SlotSize; 270 BuildMI(MBB, MBBI, DL, TII.get(MovRIOpc), Rax) 271 .addImm(Offset) 272 .setMIFlag(Flag); 273 MachineInstr *MI = BuildMI(MBB, MBBI, DL, TII.get(X86::ADD64rr), Rax) 274 .addReg(Rax) 275 .addReg(StackPtr); 276 MI->getOperand(3).setIsDead(); // The EFLAGS implicit def is dead. 277 // Exchange the new SP in RAX with the top of the stack. 278 addRegOffset( 279 BuildMI(MBB, MBBI, DL, TII.get(X86::XCHG64rm), Rax).addReg(Rax), 280 StackPtr, false, 0); 281 // Load new SP from the top of the stack into RSP. 282 addRegOffset(BuildMI(MBB, MBBI, DL, TII.get(X86::MOV64rm), StackPtr), 283 StackPtr, false, 0); 284 return; 285 } 286 } 287 288 while (Offset) { 289 uint64_t ThisVal = std::min(Offset, Chunk); 290 if (ThisVal == SlotSize) { 291 // Use push / pop for slot sized adjustments as a size optimization. We 292 // need to find a dead register when using pop. 293 unsigned Reg = isSub 294 ? (unsigned)(Is64Bit ? X86::RAX : X86::EAX) 295 : TRI->findDeadCallerSavedReg(MBB, MBBI); 296 if (Reg) { 297 unsigned Opc = isSub 298 ? (Is64Bit ? X86::PUSH64r : X86::PUSH32r) 299 : (Is64Bit ? X86::POP64r : X86::POP32r); 300 BuildMI(MBB, MBBI, DL, TII.get(Opc)) 301 .addReg(Reg, getDefRegState(!isSub) | getUndefRegState(isSub)) 302 .setMIFlag(Flag); 303 Offset -= ThisVal; 304 continue; 305 } 306 } 307 308 BuildStackAdjustment(MBB, MBBI, DL, isSub ? -ThisVal : ThisVal, InEpilogue) 309 .setMIFlag(Flag); 310 311 Offset -= ThisVal; 312 } 313 } 314 315 MachineInstrBuilder X86FrameLowering::BuildStackAdjustment( 316 MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, 317 const DebugLoc &DL, int64_t Offset, bool InEpilogue) const { 318 assert(Offset != 0 && "zero offset stack adjustment requested"); 319 320 // On Atom, using LEA to adjust SP is preferred, but using it in the epilogue 321 // is tricky. 322 bool UseLEA; 323 if (!InEpilogue) { 324 // Check if inserting the prologue at the beginning 325 // of MBB would require to use LEA operations. 326 // We need to use LEA operations if EFLAGS is live in, because 327 // it means an instruction will read it before it gets defined. 328 UseLEA = STI.useLeaForSP() || MBB.isLiveIn(X86::EFLAGS); 329 } else { 330 // If we can use LEA for SP but we shouldn't, check that none 331 // of the terminators uses the eflags. Otherwise we will insert 332 // a ADD that will redefine the eflags and break the condition. 333 // Alternatively, we could move the ADD, but this may not be possible 334 // and is an optimization anyway. 335 UseLEA = canUseLEAForSPInEpilogue(*MBB.getParent()); 336 if (UseLEA && !STI.useLeaForSP()) 337 UseLEA = flagsNeedToBePreservedBeforeTheTerminators(MBB); 338 // If that assert breaks, that means we do not do the right thing 339 // in canUseAsEpilogue. 340 assert((UseLEA || !flagsNeedToBePreservedBeforeTheTerminators(MBB)) && 341 "We shouldn't have allowed this insertion point"); 342 } 343 344 MachineInstrBuilder MI; 345 if (UseLEA) { 346 MI = addRegOffset(BuildMI(MBB, MBBI, DL, 347 TII.get(getLEArOpcode(Uses64BitFramePtr)), 348 StackPtr), 349 StackPtr, false, Offset); 350 } else { 351 bool IsSub = Offset < 0; 352 uint64_t AbsOffset = IsSub ? -Offset : Offset; 353 const unsigned Opc = IsSub ? getSUBriOpcode(Uses64BitFramePtr, AbsOffset) 354 : getADDriOpcode(Uses64BitFramePtr, AbsOffset); 355 MI = BuildMI(MBB, MBBI, DL, TII.get(Opc), StackPtr) 356 .addReg(StackPtr) 357 .addImm(AbsOffset); 358 MI->getOperand(3).setIsDead(); // The EFLAGS implicit def is dead. 359 } 360 return MI; 361 } 362 363 int X86FrameLowering::mergeSPUpdates(MachineBasicBlock &MBB, 364 MachineBasicBlock::iterator &MBBI, 365 bool doMergeWithPrevious) const { 366 if ((doMergeWithPrevious && MBBI == MBB.begin()) || 367 (!doMergeWithPrevious && MBBI == MBB.end())) 368 return 0; 369 370 MachineBasicBlock::iterator PI = doMergeWithPrevious ? std::prev(MBBI) : MBBI; 371 372 PI = skipDebugInstructionsBackward(PI, MBB.begin()); 373 // It is assumed that ADD/SUB/LEA instruction is succeded by one CFI 374 // instruction, and that there are no DBG_VALUE or other instructions between 375 // ADD/SUB/LEA and its corresponding CFI instruction. 376 /* TODO: Add support for the case where there are multiple CFI instructions 377 below the ADD/SUB/LEA, e.g.: 378 ... 379 add 380 cfi_def_cfa_offset 381 cfi_offset 382 ... 383 */ 384 if (doMergeWithPrevious && PI != MBB.begin() && PI->isCFIInstruction()) 385 PI = std::prev(PI); 386 387 unsigned Opc = PI->getOpcode(); 388 int Offset = 0; 389 390 if ((Opc == X86::ADD64ri32 || Opc == X86::ADD64ri8 || 391 Opc == X86::ADD32ri || Opc == X86::ADD32ri8) && 392 PI->getOperand(0).getReg() == StackPtr){ 393 assert(PI->getOperand(1).getReg() == StackPtr); 394 Offset = PI->getOperand(2).getImm(); 395 } else if ((Opc == X86::LEA32r || Opc == X86::LEA64_32r) && 396 PI->getOperand(0).getReg() == StackPtr && 397 PI->getOperand(1).getReg() == StackPtr && 398 PI->getOperand(2).getImm() == 1 && 399 PI->getOperand(3).getReg() == X86::NoRegister && 400 PI->getOperand(5).getReg() == X86::NoRegister) { 401 // For LEAs we have: def = lea SP, FI, noreg, Offset, noreg. 402 Offset = PI->getOperand(4).getImm(); 403 } else if ((Opc == X86::SUB64ri32 || Opc == X86::SUB64ri8 || 404 Opc == X86::SUB32ri || Opc == X86::SUB32ri8) && 405 PI->getOperand(0).getReg() == StackPtr) { 406 assert(PI->getOperand(1).getReg() == StackPtr); 407 Offset = -PI->getOperand(2).getImm(); 408 } else 409 return 0; 410 411 PI = MBB.erase(PI); 412 if (PI != MBB.end() && PI->isCFIInstruction()) { 413 auto CIs = MBB.getParent()->getFrameInstructions(); 414 MCCFIInstruction CI = CIs[PI->getOperand(0).getCFIIndex()]; 415 if (CI.getOperation() == MCCFIInstruction::OpDefCfaOffset || 416 CI.getOperation() == MCCFIInstruction::OpAdjustCfaOffset) 417 PI = MBB.erase(PI); 418 } 419 if (!doMergeWithPrevious) 420 MBBI = skipDebugInstructionsForward(PI, MBB.end()); 421 422 return Offset; 423 } 424 425 void X86FrameLowering::BuildCFI(MachineBasicBlock &MBB, 426 MachineBasicBlock::iterator MBBI, 427 const DebugLoc &DL, 428 const MCCFIInstruction &CFIInst) const { 429 MachineFunction &MF = *MBB.getParent(); 430 unsigned CFIIndex = MF.addFrameInst(CFIInst); 431 BuildMI(MBB, MBBI, DL, TII.get(TargetOpcode::CFI_INSTRUCTION)) 432 .addCFIIndex(CFIIndex); 433 } 434 435 /// Emits Dwarf Info specifying offsets of callee saved registers and 436 /// frame pointer. This is called only when basic block sections are enabled. 437 void X86FrameLowering::emitCalleeSavedFrameMoves( 438 MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI) const { 439 MachineFunction &MF = *MBB.getParent(); 440 if (!hasFP(MF)) { 441 emitCalleeSavedFrameMoves(MBB, MBBI, DebugLoc{}, true); 442 return; 443 } 444 const MachineModuleInfo &MMI = MF.getMMI(); 445 const MCRegisterInfo *MRI = MMI.getContext().getRegisterInfo(); 446 const Register FramePtr = TRI->getFrameRegister(MF); 447 const Register MachineFramePtr = 448 STI.isTarget64BitILP32() ? Register(getX86SubSuperRegister(FramePtr, 64)) 449 : FramePtr; 450 unsigned DwarfReg = MRI->getDwarfRegNum(MachineFramePtr, true); 451 // Offset = space for return address + size of the frame pointer itself. 452 unsigned Offset = (Is64Bit ? 8 : 4) + (Uses64BitFramePtr ? 8 : 4); 453 BuildCFI(MBB, MBBI, DebugLoc{}, 454 MCCFIInstruction::createOffset(nullptr, DwarfReg, -Offset)); 455 emitCalleeSavedFrameMoves(MBB, MBBI, DebugLoc{}, true); 456 } 457 458 void X86FrameLowering::emitCalleeSavedFrameMoves( 459 MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, 460 const DebugLoc &DL, bool IsPrologue) const { 461 MachineFunction &MF = *MBB.getParent(); 462 MachineFrameInfo &MFI = MF.getFrameInfo(); 463 MachineModuleInfo &MMI = MF.getMMI(); 464 const MCRegisterInfo *MRI = MMI.getContext().getRegisterInfo(); 465 466 // Add callee saved registers to move list. 467 const std::vector<CalleeSavedInfo> &CSI = MFI.getCalleeSavedInfo(); 468 if (CSI.empty()) return; 469 470 // Calculate offsets. 471 for (std::vector<CalleeSavedInfo>::const_iterator 472 I = CSI.begin(), E = CSI.end(); I != E; ++I) { 473 int64_t Offset = MFI.getObjectOffset(I->getFrameIdx()); 474 unsigned Reg = I->getReg(); 475 unsigned DwarfReg = MRI->getDwarfRegNum(Reg, true); 476 477 if (IsPrologue) { 478 BuildCFI(MBB, MBBI, DL, 479 MCCFIInstruction::createOffset(nullptr, DwarfReg, Offset)); 480 } else { 481 BuildCFI(MBB, MBBI, DL, 482 MCCFIInstruction::createRestore(nullptr, DwarfReg)); 483 } 484 } 485 } 486 487 void X86FrameLowering::emitStackProbe(MachineFunction &MF, 488 MachineBasicBlock &MBB, 489 MachineBasicBlock::iterator MBBI, 490 const DebugLoc &DL, bool InProlog) const { 491 const X86Subtarget &STI = MF.getSubtarget<X86Subtarget>(); 492 if (STI.isTargetWindowsCoreCLR()) { 493 if (InProlog) { 494 BuildMI(MBB, MBBI, DL, TII.get(X86::STACKALLOC_W_PROBING)) 495 .addImm(0 /* no explicit stack size */); 496 } else { 497 emitStackProbeInline(MF, MBB, MBBI, DL, false); 498 } 499 } else { 500 emitStackProbeCall(MF, MBB, MBBI, DL, InProlog); 501 } 502 } 503 504 void X86FrameLowering::inlineStackProbe(MachineFunction &MF, 505 MachineBasicBlock &PrologMBB) const { 506 auto Where = llvm::find_if(PrologMBB, [](MachineInstr &MI) { 507 return MI.getOpcode() == X86::STACKALLOC_W_PROBING; 508 }); 509 if (Where != PrologMBB.end()) { 510 DebugLoc DL = PrologMBB.findDebugLoc(Where); 511 emitStackProbeInline(MF, PrologMBB, Where, DL, true); 512 Where->eraseFromParent(); 513 } 514 } 515 516 void X86FrameLowering::emitStackProbeInline(MachineFunction &MF, 517 MachineBasicBlock &MBB, 518 MachineBasicBlock::iterator MBBI, 519 const DebugLoc &DL, 520 bool InProlog) const { 521 const X86Subtarget &STI = MF.getSubtarget<X86Subtarget>(); 522 if (STI.isTargetWindowsCoreCLR() && STI.is64Bit()) 523 emitStackProbeInlineWindowsCoreCLR64(MF, MBB, MBBI, DL, InProlog); 524 else 525 emitStackProbeInlineGeneric(MF, MBB, MBBI, DL, InProlog); 526 } 527 528 void X86FrameLowering::emitStackProbeInlineGeneric( 529 MachineFunction &MF, MachineBasicBlock &MBB, 530 MachineBasicBlock::iterator MBBI, const DebugLoc &DL, bool InProlog) const { 531 MachineInstr &AllocWithProbe = *MBBI; 532 uint64_t Offset = AllocWithProbe.getOperand(0).getImm(); 533 534 const X86Subtarget &STI = MF.getSubtarget<X86Subtarget>(); 535 const X86TargetLowering &TLI = *STI.getTargetLowering(); 536 assert(!(STI.is64Bit() && STI.isTargetWindowsCoreCLR()) && 537 "different expansion expected for CoreCLR 64 bit"); 538 539 const uint64_t StackProbeSize = TLI.getStackProbeSize(MF); 540 uint64_t ProbeChunk = StackProbeSize * 8; 541 542 uint64_t MaxAlign = 543 TRI->hasStackRealignment(MF) ? calculateMaxStackAlign(MF) : 0; 544 545 // Synthesize a loop or unroll it, depending on the number of iterations. 546 // BuildStackAlignAND ensures that only MaxAlign % StackProbeSize bits left 547 // between the unaligned rsp and current rsp. 548 if (Offset > ProbeChunk) { 549 emitStackProbeInlineGenericLoop(MF, MBB, MBBI, DL, Offset, 550 MaxAlign % StackProbeSize); 551 } else { 552 emitStackProbeInlineGenericBlock(MF, MBB, MBBI, DL, Offset, 553 MaxAlign % StackProbeSize); 554 } 555 } 556 557 void X86FrameLowering::emitStackProbeInlineGenericBlock( 558 MachineFunction &MF, MachineBasicBlock &MBB, 559 MachineBasicBlock::iterator MBBI, const DebugLoc &DL, uint64_t Offset, 560 uint64_t AlignOffset) const { 561 562 const bool NeedsDwarfCFI = needsDwarfCFI(MF); 563 const bool HasFP = hasFP(MF); 564 const X86Subtarget &STI = MF.getSubtarget<X86Subtarget>(); 565 const X86TargetLowering &TLI = *STI.getTargetLowering(); 566 const unsigned Opc = getSUBriOpcode(Uses64BitFramePtr, Offset); 567 const unsigned MovMIOpc = Is64Bit ? X86::MOV64mi32 : X86::MOV32mi; 568 const uint64_t StackProbeSize = TLI.getStackProbeSize(MF); 569 570 uint64_t CurrentOffset = 0; 571 572 assert(AlignOffset < StackProbeSize); 573 574 // If the offset is so small it fits within a page, there's nothing to do. 575 if (StackProbeSize < Offset + AlignOffset) { 576 577 MachineInstr *MI = BuildMI(MBB, MBBI, DL, TII.get(Opc), StackPtr) 578 .addReg(StackPtr) 579 .addImm(StackProbeSize - AlignOffset) 580 .setMIFlag(MachineInstr::FrameSetup); 581 if (!HasFP && NeedsDwarfCFI) { 582 BuildCFI(MBB, MBBI, DL, 583 MCCFIInstruction::createAdjustCfaOffset( 584 nullptr, StackProbeSize - AlignOffset)); 585 } 586 MI->getOperand(3).setIsDead(); // The EFLAGS implicit def is dead. 587 588 addRegOffset(BuildMI(MBB, MBBI, DL, TII.get(MovMIOpc)) 589 .setMIFlag(MachineInstr::FrameSetup), 590 StackPtr, false, 0) 591 .addImm(0) 592 .setMIFlag(MachineInstr::FrameSetup); 593 NumFrameExtraProbe++; 594 CurrentOffset = StackProbeSize - AlignOffset; 595 } 596 597 // For the next N - 1 pages, just probe. I tried to take advantage of 598 // natural probes but it implies much more logic and there was very few 599 // interesting natural probes to interleave. 600 while (CurrentOffset + StackProbeSize < Offset) { 601 MachineInstr *MI = BuildMI(MBB, MBBI, DL, TII.get(Opc), StackPtr) 602 .addReg(StackPtr) 603 .addImm(StackProbeSize) 604 .setMIFlag(MachineInstr::FrameSetup); 605 MI->getOperand(3).setIsDead(); // The EFLAGS implicit def is dead. 606 607 if (!HasFP && NeedsDwarfCFI) { 608 BuildCFI( 609 MBB, MBBI, DL, 610 MCCFIInstruction::createAdjustCfaOffset(nullptr, StackProbeSize)); 611 } 612 addRegOffset(BuildMI(MBB, MBBI, DL, TII.get(MovMIOpc)) 613 .setMIFlag(MachineInstr::FrameSetup), 614 StackPtr, false, 0) 615 .addImm(0) 616 .setMIFlag(MachineInstr::FrameSetup); 617 NumFrameExtraProbe++; 618 CurrentOffset += StackProbeSize; 619 } 620 621 // No need to probe the tail, it is smaller than a Page. 622 uint64_t ChunkSize = Offset - CurrentOffset; 623 MachineInstr *MI = BuildMI(MBB, MBBI, DL, TII.get(Opc), StackPtr) 624 .addReg(StackPtr) 625 .addImm(ChunkSize) 626 .setMIFlag(MachineInstr::FrameSetup); 627 // No need to adjust Dwarf CFA offset here, the last position of the stack has 628 // been defined 629 MI->getOperand(3).setIsDead(); // The EFLAGS implicit def is dead. 630 } 631 632 void X86FrameLowering::emitStackProbeInlineGenericLoop( 633 MachineFunction &MF, MachineBasicBlock &MBB, 634 MachineBasicBlock::iterator MBBI, const DebugLoc &DL, uint64_t Offset, 635 uint64_t AlignOffset) const { 636 assert(Offset && "null offset"); 637 638 const X86Subtarget &STI = MF.getSubtarget<X86Subtarget>(); 639 const X86TargetLowering &TLI = *STI.getTargetLowering(); 640 const unsigned MovMIOpc = Is64Bit ? X86::MOV64mi32 : X86::MOV32mi; 641 const uint64_t StackProbeSize = TLI.getStackProbeSize(MF); 642 643 if (AlignOffset) { 644 if (AlignOffset < StackProbeSize) { 645 // Perform a first smaller allocation followed by a probe. 646 const unsigned SUBOpc = getSUBriOpcode(Uses64BitFramePtr, AlignOffset); 647 MachineInstr *MI = BuildMI(MBB, MBBI, DL, TII.get(SUBOpc), StackPtr) 648 .addReg(StackPtr) 649 .addImm(AlignOffset) 650 .setMIFlag(MachineInstr::FrameSetup); 651 MI->getOperand(3).setIsDead(); // The EFLAGS implicit def is dead. 652 653 addRegOffset(BuildMI(MBB, MBBI, DL, TII.get(MovMIOpc)) 654 .setMIFlag(MachineInstr::FrameSetup), 655 StackPtr, false, 0) 656 .addImm(0) 657 .setMIFlag(MachineInstr::FrameSetup); 658 NumFrameExtraProbe++; 659 Offset -= AlignOffset; 660 } 661 } 662 663 // Synthesize a loop 664 NumFrameLoopProbe++; 665 const BasicBlock *LLVM_BB = MBB.getBasicBlock(); 666 667 MachineBasicBlock *testMBB = MF.CreateMachineBasicBlock(LLVM_BB); 668 MachineBasicBlock *tailMBB = MF.CreateMachineBasicBlock(LLVM_BB); 669 670 MachineFunction::iterator MBBIter = ++MBB.getIterator(); 671 MF.insert(MBBIter, testMBB); 672 MF.insert(MBBIter, tailMBB); 673 674 Register FinalStackProbed = Uses64BitFramePtr ? X86::R11 : X86::R11D; 675 BuildMI(MBB, MBBI, DL, TII.get(TargetOpcode::COPY), FinalStackProbed) 676 .addReg(StackPtr) 677 .setMIFlag(MachineInstr::FrameSetup); 678 679 // save loop bound 680 { 681 const unsigned SUBOpc = getSUBriOpcode(Uses64BitFramePtr, Offset); 682 BuildMI(MBB, MBBI, DL, TII.get(SUBOpc), FinalStackProbed) 683 .addReg(FinalStackProbed) 684 .addImm(Offset / StackProbeSize * StackProbeSize) 685 .setMIFlag(MachineInstr::FrameSetup); 686 } 687 688 // allocate a page 689 { 690 const unsigned SUBOpc = getSUBriOpcode(Uses64BitFramePtr, StackProbeSize); 691 BuildMI(testMBB, DL, TII.get(SUBOpc), StackPtr) 692 .addReg(StackPtr) 693 .addImm(StackProbeSize) 694 .setMIFlag(MachineInstr::FrameSetup); 695 } 696 697 // touch the page 698 addRegOffset(BuildMI(testMBB, DL, TII.get(MovMIOpc)) 699 .setMIFlag(MachineInstr::FrameSetup), 700 StackPtr, false, 0) 701 .addImm(0) 702 .setMIFlag(MachineInstr::FrameSetup); 703 704 // cmp with stack pointer bound 705 BuildMI(testMBB, DL, TII.get(Uses64BitFramePtr ? X86::CMP64rr : X86::CMP32rr)) 706 .addReg(StackPtr) 707 .addReg(FinalStackProbed) 708 .setMIFlag(MachineInstr::FrameSetup); 709 710 // jump 711 BuildMI(testMBB, DL, TII.get(X86::JCC_1)) 712 .addMBB(testMBB) 713 .addImm(X86::COND_NE) 714 .setMIFlag(MachineInstr::FrameSetup); 715 testMBB->addSuccessor(testMBB); 716 testMBB->addSuccessor(tailMBB); 717 718 // BB management 719 tailMBB->splice(tailMBB->end(), &MBB, MBBI, MBB.end()); 720 tailMBB->transferSuccessorsAndUpdatePHIs(&MBB); 721 MBB.addSuccessor(testMBB); 722 723 // handle tail 724 unsigned TailOffset = Offset % StackProbeSize; 725 if (TailOffset) { 726 const unsigned Opc = getSUBriOpcode(Uses64BitFramePtr, TailOffset); 727 BuildMI(*tailMBB, tailMBB->begin(), DL, TII.get(Opc), StackPtr) 728 .addReg(StackPtr) 729 .addImm(TailOffset) 730 .setMIFlag(MachineInstr::FrameSetup); 731 } 732 733 // Update Live In information 734 recomputeLiveIns(*testMBB); 735 recomputeLiveIns(*tailMBB); 736 } 737 738 void X86FrameLowering::emitStackProbeInlineWindowsCoreCLR64( 739 MachineFunction &MF, MachineBasicBlock &MBB, 740 MachineBasicBlock::iterator MBBI, const DebugLoc &DL, bool InProlog) const { 741 const X86Subtarget &STI = MF.getSubtarget<X86Subtarget>(); 742 assert(STI.is64Bit() && "different expansion needed for 32 bit"); 743 assert(STI.isTargetWindowsCoreCLR() && "custom expansion expects CoreCLR"); 744 const TargetInstrInfo &TII = *STI.getInstrInfo(); 745 const BasicBlock *LLVM_BB = MBB.getBasicBlock(); 746 747 // RAX contains the number of bytes of desired stack adjustment. 748 // The handling here assumes this value has already been updated so as to 749 // maintain stack alignment. 750 // 751 // We need to exit with RSP modified by this amount and execute suitable 752 // page touches to notify the OS that we're growing the stack responsibly. 753 // All stack probing must be done without modifying RSP. 754 // 755 // MBB: 756 // SizeReg = RAX; 757 // ZeroReg = 0 758 // CopyReg = RSP 759 // Flags, TestReg = CopyReg - SizeReg 760 // FinalReg = !Flags.Ovf ? TestReg : ZeroReg 761 // LimitReg = gs magic thread env access 762 // if FinalReg >= LimitReg goto ContinueMBB 763 // RoundBB: 764 // RoundReg = page address of FinalReg 765 // LoopMBB: 766 // LoopReg = PHI(LimitReg,ProbeReg) 767 // ProbeReg = LoopReg - PageSize 768 // [ProbeReg] = 0 769 // if (ProbeReg > RoundReg) goto LoopMBB 770 // ContinueMBB: 771 // RSP = RSP - RAX 772 // [rest of original MBB] 773 774 // Set up the new basic blocks 775 MachineBasicBlock *RoundMBB = MF.CreateMachineBasicBlock(LLVM_BB); 776 MachineBasicBlock *LoopMBB = MF.CreateMachineBasicBlock(LLVM_BB); 777 MachineBasicBlock *ContinueMBB = MF.CreateMachineBasicBlock(LLVM_BB); 778 779 MachineFunction::iterator MBBIter = std::next(MBB.getIterator()); 780 MF.insert(MBBIter, RoundMBB); 781 MF.insert(MBBIter, LoopMBB); 782 MF.insert(MBBIter, ContinueMBB); 783 784 // Split MBB and move the tail portion down to ContinueMBB. 785 MachineBasicBlock::iterator BeforeMBBI = std::prev(MBBI); 786 ContinueMBB->splice(ContinueMBB->begin(), &MBB, MBBI, MBB.end()); 787 ContinueMBB->transferSuccessorsAndUpdatePHIs(&MBB); 788 789 // Some useful constants 790 const int64_t ThreadEnvironmentStackLimit = 0x10; 791 const int64_t PageSize = 0x1000; 792 const int64_t PageMask = ~(PageSize - 1); 793 794 // Registers we need. For the normal case we use virtual 795 // registers. For the prolog expansion we use RAX, RCX and RDX. 796 MachineRegisterInfo &MRI = MF.getRegInfo(); 797 const TargetRegisterClass *RegClass = &X86::GR64RegClass; 798 const Register SizeReg = InProlog ? X86::RAX 799 : MRI.createVirtualRegister(RegClass), 800 ZeroReg = InProlog ? X86::RCX 801 : MRI.createVirtualRegister(RegClass), 802 CopyReg = InProlog ? X86::RDX 803 : MRI.createVirtualRegister(RegClass), 804 TestReg = InProlog ? X86::RDX 805 : MRI.createVirtualRegister(RegClass), 806 FinalReg = InProlog ? X86::RDX 807 : MRI.createVirtualRegister(RegClass), 808 RoundedReg = InProlog ? X86::RDX 809 : MRI.createVirtualRegister(RegClass), 810 LimitReg = InProlog ? X86::RCX 811 : MRI.createVirtualRegister(RegClass), 812 JoinReg = InProlog ? X86::RCX 813 : MRI.createVirtualRegister(RegClass), 814 ProbeReg = InProlog ? X86::RCX 815 : MRI.createVirtualRegister(RegClass); 816 817 // SP-relative offsets where we can save RCX and RDX. 818 int64_t RCXShadowSlot = 0; 819 int64_t RDXShadowSlot = 0; 820 821 // If inlining in the prolog, save RCX and RDX. 822 if (InProlog) { 823 // Compute the offsets. We need to account for things already 824 // pushed onto the stack at this point: return address, frame 825 // pointer (if used), and callee saves. 826 X86MachineFunctionInfo *X86FI = MF.getInfo<X86MachineFunctionInfo>(); 827 const int64_t CalleeSaveSize = X86FI->getCalleeSavedFrameSize(); 828 const bool HasFP = hasFP(MF); 829 830 // Check if we need to spill RCX and/or RDX. 831 // Here we assume that no earlier prologue instruction changes RCX and/or 832 // RDX, so checking the block live-ins is enough. 833 const bool IsRCXLiveIn = MBB.isLiveIn(X86::RCX); 834 const bool IsRDXLiveIn = MBB.isLiveIn(X86::RDX); 835 int64_t InitSlot = 8 + CalleeSaveSize + (HasFP ? 8 : 0); 836 // Assign the initial slot to both registers, then change RDX's slot if both 837 // need to be spilled. 838 if (IsRCXLiveIn) 839 RCXShadowSlot = InitSlot; 840 if (IsRDXLiveIn) 841 RDXShadowSlot = InitSlot; 842 if (IsRDXLiveIn && IsRCXLiveIn) 843 RDXShadowSlot += 8; 844 // Emit the saves if needed. 845 if (IsRCXLiveIn) 846 addRegOffset(BuildMI(&MBB, DL, TII.get(X86::MOV64mr)), X86::RSP, false, 847 RCXShadowSlot) 848 .addReg(X86::RCX); 849 if (IsRDXLiveIn) 850 addRegOffset(BuildMI(&MBB, DL, TII.get(X86::MOV64mr)), X86::RSP, false, 851 RDXShadowSlot) 852 .addReg(X86::RDX); 853 } else { 854 // Not in the prolog. Copy RAX to a virtual reg. 855 BuildMI(&MBB, DL, TII.get(X86::MOV64rr), SizeReg).addReg(X86::RAX); 856 } 857 858 // Add code to MBB to check for overflow and set the new target stack pointer 859 // to zero if so. 860 BuildMI(&MBB, DL, TII.get(X86::XOR64rr), ZeroReg) 861 .addReg(ZeroReg, RegState::Undef) 862 .addReg(ZeroReg, RegState::Undef); 863 BuildMI(&MBB, DL, TII.get(X86::MOV64rr), CopyReg).addReg(X86::RSP); 864 BuildMI(&MBB, DL, TII.get(X86::SUB64rr), TestReg) 865 .addReg(CopyReg) 866 .addReg(SizeReg); 867 BuildMI(&MBB, DL, TII.get(X86::CMOV64rr), FinalReg) 868 .addReg(TestReg) 869 .addReg(ZeroReg) 870 .addImm(X86::COND_B); 871 872 // FinalReg now holds final stack pointer value, or zero if 873 // allocation would overflow. Compare against the current stack 874 // limit from the thread environment block. Note this limit is the 875 // lowest touched page on the stack, not the point at which the OS 876 // will cause an overflow exception, so this is just an optimization 877 // to avoid unnecessarily touching pages that are below the current 878 // SP but already committed to the stack by the OS. 879 BuildMI(&MBB, DL, TII.get(X86::MOV64rm), LimitReg) 880 .addReg(0) 881 .addImm(1) 882 .addReg(0) 883 .addImm(ThreadEnvironmentStackLimit) 884 .addReg(X86::GS); 885 BuildMI(&MBB, DL, TII.get(X86::CMP64rr)).addReg(FinalReg).addReg(LimitReg); 886 // Jump if the desired stack pointer is at or above the stack limit. 887 BuildMI(&MBB, DL, TII.get(X86::JCC_1)).addMBB(ContinueMBB).addImm(X86::COND_AE); 888 889 // Add code to roundMBB to round the final stack pointer to a page boundary. 890 RoundMBB->addLiveIn(FinalReg); 891 BuildMI(RoundMBB, DL, TII.get(X86::AND64ri32), RoundedReg) 892 .addReg(FinalReg) 893 .addImm(PageMask); 894 BuildMI(RoundMBB, DL, TII.get(X86::JMP_1)).addMBB(LoopMBB); 895 896 // LimitReg now holds the current stack limit, RoundedReg page-rounded 897 // final RSP value. Add code to loopMBB to decrement LimitReg page-by-page 898 // and probe until we reach RoundedReg. 899 if (!InProlog) { 900 BuildMI(LoopMBB, DL, TII.get(X86::PHI), JoinReg) 901 .addReg(LimitReg) 902 .addMBB(RoundMBB) 903 .addReg(ProbeReg) 904 .addMBB(LoopMBB); 905 } 906 907 LoopMBB->addLiveIn(JoinReg); 908 addRegOffset(BuildMI(LoopMBB, DL, TII.get(X86::LEA64r), ProbeReg), JoinReg, 909 false, -PageSize); 910 911 // Probe by storing a byte onto the stack. 912 BuildMI(LoopMBB, DL, TII.get(X86::MOV8mi)) 913 .addReg(ProbeReg) 914 .addImm(1) 915 .addReg(0) 916 .addImm(0) 917 .addReg(0) 918 .addImm(0); 919 920 LoopMBB->addLiveIn(RoundedReg); 921 BuildMI(LoopMBB, DL, TII.get(X86::CMP64rr)) 922 .addReg(RoundedReg) 923 .addReg(ProbeReg); 924 BuildMI(LoopMBB, DL, TII.get(X86::JCC_1)).addMBB(LoopMBB).addImm(X86::COND_NE); 925 926 MachineBasicBlock::iterator ContinueMBBI = ContinueMBB->getFirstNonPHI(); 927 928 // If in prolog, restore RDX and RCX. 929 if (InProlog) { 930 if (RCXShadowSlot) // It means we spilled RCX in the prologue. 931 addRegOffset(BuildMI(*ContinueMBB, ContinueMBBI, DL, 932 TII.get(X86::MOV64rm), X86::RCX), 933 X86::RSP, false, RCXShadowSlot); 934 if (RDXShadowSlot) // It means we spilled RDX in the prologue. 935 addRegOffset(BuildMI(*ContinueMBB, ContinueMBBI, DL, 936 TII.get(X86::MOV64rm), X86::RDX), 937 X86::RSP, false, RDXShadowSlot); 938 } 939 940 // Now that the probing is done, add code to continueMBB to update 941 // the stack pointer for real. 942 ContinueMBB->addLiveIn(SizeReg); 943 BuildMI(*ContinueMBB, ContinueMBBI, DL, TII.get(X86::SUB64rr), X86::RSP) 944 .addReg(X86::RSP) 945 .addReg(SizeReg); 946 947 // Add the control flow edges we need. 948 MBB.addSuccessor(ContinueMBB); 949 MBB.addSuccessor(RoundMBB); 950 RoundMBB->addSuccessor(LoopMBB); 951 LoopMBB->addSuccessor(ContinueMBB); 952 LoopMBB->addSuccessor(LoopMBB); 953 954 // Mark all the instructions added to the prolog as frame setup. 955 if (InProlog) { 956 for (++BeforeMBBI; BeforeMBBI != MBB.end(); ++BeforeMBBI) { 957 BeforeMBBI->setFlag(MachineInstr::FrameSetup); 958 } 959 for (MachineInstr &MI : *RoundMBB) { 960 MI.setFlag(MachineInstr::FrameSetup); 961 } 962 for (MachineInstr &MI : *LoopMBB) { 963 MI.setFlag(MachineInstr::FrameSetup); 964 } 965 for (MachineBasicBlock::iterator CMBBI = ContinueMBB->begin(); 966 CMBBI != ContinueMBBI; ++CMBBI) { 967 CMBBI->setFlag(MachineInstr::FrameSetup); 968 } 969 } 970 } 971 972 void X86FrameLowering::emitStackProbeCall(MachineFunction &MF, 973 MachineBasicBlock &MBB, 974 MachineBasicBlock::iterator MBBI, 975 const DebugLoc &DL, 976 bool InProlog) const { 977 bool IsLargeCodeModel = MF.getTarget().getCodeModel() == CodeModel::Large; 978 979 // FIXME: Add indirect thunk support and remove this. 980 if (Is64Bit && IsLargeCodeModel && STI.useIndirectThunkCalls()) 981 report_fatal_error("Emitting stack probe calls on 64-bit with the large " 982 "code model and indirect thunks not yet implemented."); 983 984 unsigned CallOp; 985 if (Is64Bit) 986 CallOp = IsLargeCodeModel ? X86::CALL64r : X86::CALL64pcrel32; 987 else 988 CallOp = X86::CALLpcrel32; 989 990 StringRef Symbol = STI.getTargetLowering()->getStackProbeSymbolName(MF); 991 992 MachineInstrBuilder CI; 993 MachineBasicBlock::iterator ExpansionMBBI = std::prev(MBBI); 994 995 // All current stack probes take AX and SP as input, clobber flags, and 996 // preserve all registers. x86_64 probes leave RSP unmodified. 997 if (Is64Bit && MF.getTarget().getCodeModel() == CodeModel::Large) { 998 // For the large code model, we have to call through a register. Use R11, 999 // as it is scratch in all supported calling conventions. 1000 BuildMI(MBB, MBBI, DL, TII.get(X86::MOV64ri), X86::R11) 1001 .addExternalSymbol(MF.createExternalSymbolName(Symbol)); 1002 CI = BuildMI(MBB, MBBI, DL, TII.get(CallOp)).addReg(X86::R11); 1003 } else { 1004 CI = BuildMI(MBB, MBBI, DL, TII.get(CallOp)) 1005 .addExternalSymbol(MF.createExternalSymbolName(Symbol)); 1006 } 1007 1008 unsigned AX = Uses64BitFramePtr ? X86::RAX : X86::EAX; 1009 unsigned SP = Uses64BitFramePtr ? X86::RSP : X86::ESP; 1010 CI.addReg(AX, RegState::Implicit) 1011 .addReg(SP, RegState::Implicit) 1012 .addReg(AX, RegState::Define | RegState::Implicit) 1013 .addReg(SP, RegState::Define | RegState::Implicit) 1014 .addReg(X86::EFLAGS, RegState::Define | RegState::Implicit); 1015 1016 if (STI.isTargetWin64() || !STI.isOSWindows()) { 1017 // MSVC x32's _chkstk and cygwin/mingw's _alloca adjust %esp themselves. 1018 // MSVC x64's __chkstk and cygwin/mingw's ___chkstk_ms do not adjust %rsp 1019 // themselves. They also does not clobber %rax so we can reuse it when 1020 // adjusting %rsp. 1021 // All other platforms do not specify a particular ABI for the stack probe 1022 // function, so we arbitrarily define it to not adjust %esp/%rsp itself. 1023 BuildMI(MBB, MBBI, DL, TII.get(getSUBrrOpcode(Uses64BitFramePtr)), SP) 1024 .addReg(SP) 1025 .addReg(AX); 1026 } 1027 1028 if (InProlog) { 1029 // Apply the frame setup flag to all inserted instrs. 1030 for (++ExpansionMBBI; ExpansionMBBI != MBBI; ++ExpansionMBBI) 1031 ExpansionMBBI->setFlag(MachineInstr::FrameSetup); 1032 } 1033 } 1034 1035 static unsigned calculateSetFPREG(uint64_t SPAdjust) { 1036 // Win64 ABI has a less restrictive limitation of 240; 128 works equally well 1037 // and might require smaller successive adjustments. 1038 const uint64_t Win64MaxSEHOffset = 128; 1039 uint64_t SEHFrameOffset = std::min(SPAdjust, Win64MaxSEHOffset); 1040 // Win64 ABI requires 16-byte alignment for the UWOP_SET_FPREG opcode. 1041 return SEHFrameOffset & -16; 1042 } 1043 1044 // If we're forcing a stack realignment we can't rely on just the frame 1045 // info, we need to know the ABI stack alignment as well in case we 1046 // have a call out. Otherwise just make sure we have some alignment - we'll 1047 // go with the minimum SlotSize. 1048 uint64_t X86FrameLowering::calculateMaxStackAlign(const MachineFunction &MF) const { 1049 const MachineFrameInfo &MFI = MF.getFrameInfo(); 1050 Align MaxAlign = MFI.getMaxAlign(); // Desired stack alignment. 1051 Align StackAlign = getStackAlign(); 1052 if (MF.getFunction().hasFnAttribute("stackrealign")) { 1053 if (MFI.hasCalls()) 1054 MaxAlign = (StackAlign > MaxAlign) ? StackAlign : MaxAlign; 1055 else if (MaxAlign < SlotSize) 1056 MaxAlign = Align(SlotSize); 1057 } 1058 return MaxAlign.value(); 1059 } 1060 1061 void X86FrameLowering::BuildStackAlignAND(MachineBasicBlock &MBB, 1062 MachineBasicBlock::iterator MBBI, 1063 const DebugLoc &DL, unsigned Reg, 1064 uint64_t MaxAlign) const { 1065 uint64_t Val = -MaxAlign; 1066 unsigned AndOp = getANDriOpcode(Uses64BitFramePtr, Val); 1067 1068 MachineFunction &MF = *MBB.getParent(); 1069 const X86Subtarget &STI = MF.getSubtarget<X86Subtarget>(); 1070 const X86TargetLowering &TLI = *STI.getTargetLowering(); 1071 const uint64_t StackProbeSize = TLI.getStackProbeSize(MF); 1072 const bool EmitInlineStackProbe = TLI.hasInlineStackProbe(MF); 1073 1074 // We want to make sure that (in worst case) less than StackProbeSize bytes 1075 // are not probed after the AND. This assumption is used in 1076 // emitStackProbeInlineGeneric. 1077 if (Reg == StackPtr && EmitInlineStackProbe && MaxAlign >= StackProbeSize) { 1078 { 1079 NumFrameLoopProbe++; 1080 MachineBasicBlock *entryMBB = 1081 MF.CreateMachineBasicBlock(MBB.getBasicBlock()); 1082 MachineBasicBlock *headMBB = 1083 MF.CreateMachineBasicBlock(MBB.getBasicBlock()); 1084 MachineBasicBlock *bodyMBB = 1085 MF.CreateMachineBasicBlock(MBB.getBasicBlock()); 1086 MachineBasicBlock *footMBB = 1087 MF.CreateMachineBasicBlock(MBB.getBasicBlock()); 1088 1089 MachineFunction::iterator MBBIter = MBB.getIterator(); 1090 MF.insert(MBBIter, entryMBB); 1091 MF.insert(MBBIter, headMBB); 1092 MF.insert(MBBIter, bodyMBB); 1093 MF.insert(MBBIter, footMBB); 1094 const unsigned MovMIOpc = Is64Bit ? X86::MOV64mi32 : X86::MOV32mi; 1095 Register FinalStackProbed = Uses64BitFramePtr ? X86::R11 : X86::R11D; 1096 1097 // Setup entry block 1098 { 1099 1100 entryMBB->splice(entryMBB->end(), &MBB, MBB.begin(), MBBI); 1101 BuildMI(entryMBB, DL, TII.get(TargetOpcode::COPY), FinalStackProbed) 1102 .addReg(StackPtr) 1103 .setMIFlag(MachineInstr::FrameSetup); 1104 MachineInstr *MI = 1105 BuildMI(entryMBB, DL, TII.get(AndOp), FinalStackProbed) 1106 .addReg(FinalStackProbed) 1107 .addImm(Val) 1108 .setMIFlag(MachineInstr::FrameSetup); 1109 1110 // The EFLAGS implicit def is dead. 1111 MI->getOperand(3).setIsDead(); 1112 1113 BuildMI(entryMBB, DL, 1114 TII.get(Uses64BitFramePtr ? X86::CMP64rr : X86::CMP32rr)) 1115 .addReg(FinalStackProbed) 1116 .addReg(StackPtr) 1117 .setMIFlag(MachineInstr::FrameSetup); 1118 BuildMI(entryMBB, DL, TII.get(X86::JCC_1)) 1119 .addMBB(&MBB) 1120 .addImm(X86::COND_E) 1121 .setMIFlag(MachineInstr::FrameSetup); 1122 entryMBB->addSuccessor(headMBB); 1123 entryMBB->addSuccessor(&MBB); 1124 } 1125 1126 // Loop entry block 1127 1128 { 1129 const unsigned SUBOpc = 1130 getSUBriOpcode(Uses64BitFramePtr, StackProbeSize); 1131 BuildMI(headMBB, DL, TII.get(SUBOpc), StackPtr) 1132 .addReg(StackPtr) 1133 .addImm(StackProbeSize) 1134 .setMIFlag(MachineInstr::FrameSetup); 1135 1136 BuildMI(headMBB, DL, 1137 TII.get(Uses64BitFramePtr ? X86::CMP64rr : X86::CMP32rr)) 1138 .addReg(FinalStackProbed) 1139 .addReg(StackPtr) 1140 .setMIFlag(MachineInstr::FrameSetup); 1141 1142 // jump 1143 BuildMI(headMBB, DL, TII.get(X86::JCC_1)) 1144 .addMBB(footMBB) 1145 .addImm(X86::COND_B) 1146 .setMIFlag(MachineInstr::FrameSetup); 1147 1148 headMBB->addSuccessor(bodyMBB); 1149 headMBB->addSuccessor(footMBB); 1150 } 1151 1152 // setup loop body 1153 { 1154 addRegOffset(BuildMI(bodyMBB, DL, TII.get(MovMIOpc)) 1155 .setMIFlag(MachineInstr::FrameSetup), 1156 StackPtr, false, 0) 1157 .addImm(0) 1158 .setMIFlag(MachineInstr::FrameSetup); 1159 1160 const unsigned SUBOpc = 1161 getSUBriOpcode(Uses64BitFramePtr, StackProbeSize); 1162 BuildMI(bodyMBB, DL, TII.get(SUBOpc), StackPtr) 1163 .addReg(StackPtr) 1164 .addImm(StackProbeSize) 1165 .setMIFlag(MachineInstr::FrameSetup); 1166 1167 // cmp with stack pointer bound 1168 BuildMI(bodyMBB, DL, 1169 TII.get(Uses64BitFramePtr ? X86::CMP64rr : X86::CMP32rr)) 1170 .addReg(FinalStackProbed) 1171 .addReg(StackPtr) 1172 .setMIFlag(MachineInstr::FrameSetup); 1173 1174 // jump 1175 BuildMI(bodyMBB, DL, TII.get(X86::JCC_1)) 1176 .addMBB(bodyMBB) 1177 .addImm(X86::COND_B) 1178 .setMIFlag(MachineInstr::FrameSetup); 1179 bodyMBB->addSuccessor(bodyMBB); 1180 bodyMBB->addSuccessor(footMBB); 1181 } 1182 1183 // setup loop footer 1184 { 1185 BuildMI(footMBB, DL, TII.get(TargetOpcode::COPY), StackPtr) 1186 .addReg(FinalStackProbed) 1187 .setMIFlag(MachineInstr::FrameSetup); 1188 addRegOffset(BuildMI(footMBB, DL, TII.get(MovMIOpc)) 1189 .setMIFlag(MachineInstr::FrameSetup), 1190 StackPtr, false, 0) 1191 .addImm(0) 1192 .setMIFlag(MachineInstr::FrameSetup); 1193 footMBB->addSuccessor(&MBB); 1194 } 1195 1196 recomputeLiveIns(*headMBB); 1197 recomputeLiveIns(*bodyMBB); 1198 recomputeLiveIns(*footMBB); 1199 recomputeLiveIns(MBB); 1200 } 1201 } else { 1202 MachineInstr *MI = BuildMI(MBB, MBBI, DL, TII.get(AndOp), Reg) 1203 .addReg(Reg) 1204 .addImm(Val) 1205 .setMIFlag(MachineInstr::FrameSetup); 1206 1207 // The EFLAGS implicit def is dead. 1208 MI->getOperand(3).setIsDead(); 1209 } 1210 } 1211 1212 bool X86FrameLowering::has128ByteRedZone(const MachineFunction& MF) const { 1213 // x86-64 (non Win64) has a 128 byte red zone which is guaranteed not to be 1214 // clobbered by any interrupt handler. 1215 assert(&STI == &MF.getSubtarget<X86Subtarget>() && 1216 "MF used frame lowering for wrong subtarget"); 1217 const Function &Fn = MF.getFunction(); 1218 const bool IsWin64CC = STI.isCallingConvWin64(Fn.getCallingConv()); 1219 return Is64Bit && !IsWin64CC && !Fn.hasFnAttribute(Attribute::NoRedZone); 1220 } 1221 1222 bool X86FrameLowering::isWin64Prologue(const MachineFunction &MF) const { 1223 return MF.getTarget().getMCAsmInfo()->usesWindowsCFI(); 1224 } 1225 1226 bool X86FrameLowering::needsDwarfCFI(const MachineFunction &MF) const { 1227 return !isWin64Prologue(MF) && MF.needsFrameMoves(); 1228 } 1229 1230 /// emitPrologue - Push callee-saved registers onto the stack, which 1231 /// automatically adjust the stack pointer. Adjust the stack pointer to allocate 1232 /// space for local variables. Also emit labels used by the exception handler to 1233 /// generate the exception handling frames. 1234 1235 /* 1236 Here's a gist of what gets emitted: 1237 1238 ; Establish frame pointer, if needed 1239 [if needs FP] 1240 push %rbp 1241 .cfi_def_cfa_offset 16 1242 .cfi_offset %rbp, -16 1243 .seh_pushreg %rpb 1244 mov %rsp, %rbp 1245 .cfi_def_cfa_register %rbp 1246 1247 ; Spill general-purpose registers 1248 [for all callee-saved GPRs] 1249 pushq %<reg> 1250 [if not needs FP] 1251 .cfi_def_cfa_offset (offset from RETADDR) 1252 .seh_pushreg %<reg> 1253 1254 ; If the required stack alignment > default stack alignment 1255 ; rsp needs to be re-aligned. This creates a "re-alignment gap" 1256 ; of unknown size in the stack frame. 1257 [if stack needs re-alignment] 1258 and $MASK, %rsp 1259 1260 ; Allocate space for locals 1261 [if target is Windows and allocated space > 4096 bytes] 1262 ; Windows needs special care for allocations larger 1263 ; than one page. 1264 mov $NNN, %rax 1265 call ___chkstk_ms/___chkstk 1266 sub %rax, %rsp 1267 [else] 1268 sub $NNN, %rsp 1269 1270 [if needs FP] 1271 .seh_stackalloc (size of XMM spill slots) 1272 .seh_setframe %rbp, SEHFrameOffset ; = size of all spill slots 1273 [else] 1274 .seh_stackalloc NNN 1275 1276 ; Spill XMMs 1277 ; Note, that while only Windows 64 ABI specifies XMMs as callee-preserved, 1278 ; they may get spilled on any platform, if the current function 1279 ; calls @llvm.eh.unwind.init 1280 [if needs FP] 1281 [for all callee-saved XMM registers] 1282 movaps %<xmm reg>, -MMM(%rbp) 1283 [for all callee-saved XMM registers] 1284 .seh_savexmm %<xmm reg>, (-MMM + SEHFrameOffset) 1285 ; i.e. the offset relative to (%rbp - SEHFrameOffset) 1286 [else] 1287 [for all callee-saved XMM registers] 1288 movaps %<xmm reg>, KKK(%rsp) 1289 [for all callee-saved XMM registers] 1290 .seh_savexmm %<xmm reg>, KKK 1291 1292 .seh_endprologue 1293 1294 [if needs base pointer] 1295 mov %rsp, %rbx 1296 [if needs to restore base pointer] 1297 mov %rsp, -MMM(%rbp) 1298 1299 ; Emit CFI info 1300 [if needs FP] 1301 [for all callee-saved registers] 1302 .cfi_offset %<reg>, (offset from %rbp) 1303 [else] 1304 .cfi_def_cfa_offset (offset from RETADDR) 1305 [for all callee-saved registers] 1306 .cfi_offset %<reg>, (offset from %rsp) 1307 1308 Notes: 1309 - .seh directives are emitted only for Windows 64 ABI 1310 - .cv_fpo directives are emitted on win32 when emitting CodeView 1311 - .cfi directives are emitted for all other ABIs 1312 - for 32-bit code, substitute %e?? registers for %r?? 1313 */ 1314 1315 void X86FrameLowering::emitPrologue(MachineFunction &MF, 1316 MachineBasicBlock &MBB) const { 1317 assert(&STI == &MF.getSubtarget<X86Subtarget>() && 1318 "MF used frame lowering for wrong subtarget"); 1319 MachineBasicBlock::iterator MBBI = MBB.begin(); 1320 MachineFrameInfo &MFI = MF.getFrameInfo(); 1321 const Function &Fn = MF.getFunction(); 1322 MachineModuleInfo &MMI = MF.getMMI(); 1323 X86MachineFunctionInfo *X86FI = MF.getInfo<X86MachineFunctionInfo>(); 1324 uint64_t MaxAlign = calculateMaxStackAlign(MF); // Desired stack alignment. 1325 uint64_t StackSize = MFI.getStackSize(); // Number of bytes to allocate. 1326 bool IsFunclet = MBB.isEHFuncletEntry(); 1327 EHPersonality Personality = EHPersonality::Unknown; 1328 if (Fn.hasPersonalityFn()) 1329 Personality = classifyEHPersonality(Fn.getPersonalityFn()); 1330 bool FnHasClrFunclet = 1331 MF.hasEHFunclets() && Personality == EHPersonality::CoreCLR; 1332 bool IsClrFunclet = IsFunclet && FnHasClrFunclet; 1333 bool HasFP = hasFP(MF); 1334 bool IsWin64Prologue = isWin64Prologue(MF); 1335 bool NeedsWin64CFI = IsWin64Prologue && Fn.needsUnwindTableEntry(); 1336 // FIXME: Emit FPO data for EH funclets. 1337 bool NeedsWinFPO = 1338 !IsFunclet && STI.isTargetWin32() && MMI.getModule()->getCodeViewFlag(); 1339 bool NeedsWinCFI = NeedsWin64CFI || NeedsWinFPO; 1340 bool NeedsDwarfCFI = needsDwarfCFI(MF); 1341 Register FramePtr = TRI->getFrameRegister(MF); 1342 const Register MachineFramePtr = 1343 STI.isTarget64BitILP32() 1344 ? Register(getX86SubSuperRegister(FramePtr, 64)) : FramePtr; 1345 Register BasePtr = TRI->getBaseRegister(); 1346 bool HasWinCFI = false; 1347 1348 // Debug location must be unknown since the first debug location is used 1349 // to determine the end of the prologue. 1350 DebugLoc DL; 1351 1352 // Space reserved for stack-based arguments when making a (ABI-guaranteed) 1353 // tail call. 1354 unsigned TailCallArgReserveSize = -X86FI->getTCReturnAddrDelta(); 1355 if (TailCallArgReserveSize && IsWin64Prologue) 1356 report_fatal_error("Can't handle guaranteed tail call under win64 yet"); 1357 1358 const bool EmitStackProbeCall = 1359 STI.getTargetLowering()->hasStackProbeSymbol(MF); 1360 unsigned StackProbeSize = STI.getTargetLowering()->getStackProbeSize(MF); 1361 1362 if (HasFP && X86FI->hasSwiftAsyncContext()) { 1363 BuildMI(MBB, MBBI, DL, TII.get(X86::BTS64ri8), 1364 MachineFramePtr) 1365 .addUse(MachineFramePtr) 1366 .addImm(60) 1367 .setMIFlag(MachineInstr::FrameSetup); 1368 } 1369 1370 // Re-align the stack on 64-bit if the x86-interrupt calling convention is 1371 // used and an error code was pushed, since the x86-64 ABI requires a 16-byte 1372 // stack alignment. 1373 if (Fn.getCallingConv() == CallingConv::X86_INTR && Is64Bit && 1374 Fn.arg_size() == 2) { 1375 StackSize += 8; 1376 MFI.setStackSize(StackSize); 1377 emitSPUpdate(MBB, MBBI, DL, -8, /*InEpilogue=*/false); 1378 } 1379 1380 // If this is x86-64 and the Red Zone is not disabled, if we are a leaf 1381 // function, and use up to 128 bytes of stack space, don't have a frame 1382 // pointer, calls, or dynamic alloca then we do not need to adjust the 1383 // stack pointer (we fit in the Red Zone). We also check that we don't 1384 // push and pop from the stack. 1385 if (has128ByteRedZone(MF) && !TRI->hasStackRealignment(MF) && 1386 !MFI.hasVarSizedObjects() && // No dynamic alloca. 1387 !MFI.adjustsStack() && // No calls. 1388 !EmitStackProbeCall && // No stack probes. 1389 !MFI.hasCopyImplyingStackAdjustment() && // Don't push and pop. 1390 !MF.shouldSplitStack()) { // Regular stack 1391 uint64_t MinSize = 1392 X86FI->getCalleeSavedFrameSize() - X86FI->getTCReturnAddrDelta(); 1393 if (HasFP) MinSize += SlotSize; 1394 X86FI->setUsesRedZone(MinSize > 0 || StackSize > 0); 1395 StackSize = std::max(MinSize, StackSize > 128 ? StackSize - 128 : 0); 1396 MFI.setStackSize(StackSize); 1397 } 1398 1399 // Insert stack pointer adjustment for later moving of return addr. Only 1400 // applies to tail call optimized functions where the callee argument stack 1401 // size is bigger than the callers. 1402 if (TailCallArgReserveSize != 0) { 1403 BuildStackAdjustment(MBB, MBBI, DL, -(int)TailCallArgReserveSize, 1404 /*InEpilogue=*/false) 1405 .setMIFlag(MachineInstr::FrameSetup); 1406 } 1407 1408 // Mapping for machine moves: 1409 // 1410 // DST: VirtualFP AND 1411 // SRC: VirtualFP => DW_CFA_def_cfa_offset 1412 // ELSE => DW_CFA_def_cfa 1413 // 1414 // SRC: VirtualFP AND 1415 // DST: Register => DW_CFA_def_cfa_register 1416 // 1417 // ELSE 1418 // OFFSET < 0 => DW_CFA_offset_extended_sf 1419 // REG < 64 => DW_CFA_offset + Reg 1420 // ELSE => DW_CFA_offset_extended 1421 1422 uint64_t NumBytes = 0; 1423 int stackGrowth = -SlotSize; 1424 1425 // Find the funclet establisher parameter 1426 Register Establisher = X86::NoRegister; 1427 if (IsClrFunclet) 1428 Establisher = Uses64BitFramePtr ? X86::RCX : X86::ECX; 1429 else if (IsFunclet) 1430 Establisher = Uses64BitFramePtr ? X86::RDX : X86::EDX; 1431 1432 if (IsWin64Prologue && IsFunclet && !IsClrFunclet) { 1433 // Immediately spill establisher into the home slot. 1434 // The runtime cares about this. 1435 // MOV64mr %rdx, 16(%rsp) 1436 unsigned MOVmr = Uses64BitFramePtr ? X86::MOV64mr : X86::MOV32mr; 1437 addRegOffset(BuildMI(MBB, MBBI, DL, TII.get(MOVmr)), StackPtr, true, 16) 1438 .addReg(Establisher) 1439 .setMIFlag(MachineInstr::FrameSetup); 1440 MBB.addLiveIn(Establisher); 1441 } 1442 1443 if (HasFP) { 1444 assert(MF.getRegInfo().isReserved(MachineFramePtr) && "FP reserved"); 1445 1446 // Calculate required stack adjustment. 1447 uint64_t FrameSize = StackSize - SlotSize; 1448 // If required, include space for extra hidden slot for stashing base pointer. 1449 if (X86FI->getRestoreBasePointer()) 1450 FrameSize += SlotSize; 1451 1452 NumBytes = FrameSize - 1453 (X86FI->getCalleeSavedFrameSize() + TailCallArgReserveSize); 1454 1455 // Callee-saved registers are pushed on stack before the stack is realigned. 1456 if (TRI->hasStackRealignment(MF) && !IsWin64Prologue) 1457 NumBytes = alignTo(NumBytes, MaxAlign); 1458 1459 // Save EBP/RBP into the appropriate stack slot. 1460 BuildMI(MBB, MBBI, DL, TII.get(Is64Bit ? X86::PUSH64r : X86::PUSH32r)) 1461 .addReg(MachineFramePtr, RegState::Kill) 1462 .setMIFlag(MachineInstr::FrameSetup); 1463 1464 if (NeedsDwarfCFI) { 1465 // Mark the place where EBP/RBP was saved. 1466 // Define the current CFA rule to use the provided offset. 1467 assert(StackSize); 1468 BuildCFI(MBB, MBBI, DL, 1469 MCCFIInstruction::cfiDefCfaOffset(nullptr, -2 * stackGrowth)); 1470 1471 // Change the rule for the FramePtr to be an "offset" rule. 1472 unsigned DwarfFramePtr = TRI->getDwarfRegNum(MachineFramePtr, true); 1473 BuildCFI(MBB, MBBI, DL, MCCFIInstruction::createOffset( 1474 nullptr, DwarfFramePtr, 2 * stackGrowth)); 1475 } 1476 1477 if (NeedsWinCFI) { 1478 HasWinCFI = true; 1479 BuildMI(MBB, MBBI, DL, TII.get(X86::SEH_PushReg)) 1480 .addImm(FramePtr) 1481 .setMIFlag(MachineInstr::FrameSetup); 1482 } 1483 1484 if (!IsFunclet) { 1485 if (X86FI->hasSwiftAsyncContext()) { 1486 const auto &Attrs = MF.getFunction().getAttributes(); 1487 1488 // Before we update the live frame pointer we have to ensure there's a 1489 // valid (or null) asynchronous context in its slot just before FP in 1490 // the frame record, so store it now. 1491 if (Attrs.hasAttrSomewhere(Attribute::SwiftAsync)) { 1492 // We have an initial context in r14, store it just before the frame 1493 // pointer. 1494 MBB.addLiveIn(X86::R14); 1495 BuildMI(MBB, MBBI, DL, TII.get(X86::PUSH64r)) 1496 .addReg(X86::R14) 1497 .setMIFlag(MachineInstr::FrameSetup); 1498 } else { 1499 // No initial context, store null so that there's no pointer that 1500 // could be misused. 1501 BuildMI(MBB, MBBI, DL, TII.get(X86::PUSH64i8)) 1502 .addImm(0) 1503 .setMIFlag(MachineInstr::FrameSetup); 1504 } 1505 1506 if (NeedsWinCFI) { 1507 HasWinCFI = true; 1508 BuildMI(MBB, MBBI, DL, TII.get(X86::SEH_PushReg)) 1509 .addImm(X86::R14) 1510 .setMIFlag(MachineInstr::FrameSetup); 1511 } 1512 1513 BuildMI(MBB, MBBI, DL, TII.get(X86::LEA64r), FramePtr) 1514 .addUse(X86::RSP) 1515 .addImm(1) 1516 .addUse(X86::NoRegister) 1517 .addImm(8) 1518 .addUse(X86::NoRegister) 1519 .setMIFlag(MachineInstr::FrameSetup); 1520 BuildMI(MBB, MBBI, DL, TII.get(X86::SUB64ri8), X86::RSP) 1521 .addUse(X86::RSP) 1522 .addImm(8) 1523 .setMIFlag(MachineInstr::FrameSetup); 1524 } 1525 1526 if (!IsWin64Prologue && !IsFunclet) { 1527 // Update EBP with the new base value. 1528 if (!X86FI->hasSwiftAsyncContext()) 1529 BuildMI(MBB, MBBI, DL, 1530 TII.get(Uses64BitFramePtr ? X86::MOV64rr : X86::MOV32rr), 1531 FramePtr) 1532 .addReg(StackPtr) 1533 .setMIFlag(MachineInstr::FrameSetup); 1534 1535 if (NeedsDwarfCFI) { 1536 // Mark effective beginning of when frame pointer becomes valid. 1537 // Define the current CFA to use the EBP/RBP register. 1538 unsigned DwarfFramePtr = TRI->getDwarfRegNum(MachineFramePtr, true); 1539 BuildCFI( 1540 MBB, MBBI, DL, 1541 MCCFIInstruction::createDefCfaRegister(nullptr, DwarfFramePtr)); 1542 } 1543 1544 if (NeedsWinFPO) { 1545 // .cv_fpo_setframe $FramePtr 1546 HasWinCFI = true; 1547 BuildMI(MBB, MBBI, DL, TII.get(X86::SEH_SetFrame)) 1548 .addImm(FramePtr) 1549 .addImm(0) 1550 .setMIFlag(MachineInstr::FrameSetup); 1551 } 1552 } 1553 } 1554 } else { 1555 assert(!IsFunclet && "funclets without FPs not yet implemented"); 1556 NumBytes = StackSize - 1557 (X86FI->getCalleeSavedFrameSize() + TailCallArgReserveSize); 1558 } 1559 1560 // Update the offset adjustment, which is mainly used by codeview to translate 1561 // from ESP to VFRAME relative local variable offsets. 1562 if (!IsFunclet) { 1563 if (HasFP && TRI->hasStackRealignment(MF)) 1564 MFI.setOffsetAdjustment(-NumBytes); 1565 else 1566 MFI.setOffsetAdjustment(-StackSize); 1567 } 1568 1569 // For EH funclets, only allocate enough space for outgoing calls. Save the 1570 // NumBytes value that we would've used for the parent frame. 1571 unsigned ParentFrameNumBytes = NumBytes; 1572 if (IsFunclet) 1573 NumBytes = getWinEHFuncletFrameSize(MF); 1574 1575 // Skip the callee-saved push instructions. 1576 bool PushedRegs = false; 1577 int StackOffset = 2 * stackGrowth; 1578 1579 while (MBBI != MBB.end() && 1580 MBBI->getFlag(MachineInstr::FrameSetup) && 1581 (MBBI->getOpcode() == X86::PUSH32r || 1582 MBBI->getOpcode() == X86::PUSH64r)) { 1583 PushedRegs = true; 1584 Register Reg = MBBI->getOperand(0).getReg(); 1585 ++MBBI; 1586 1587 if (!HasFP && NeedsDwarfCFI) { 1588 // Mark callee-saved push instruction. 1589 // Define the current CFA rule to use the provided offset. 1590 assert(StackSize); 1591 BuildCFI(MBB, MBBI, DL, 1592 MCCFIInstruction::cfiDefCfaOffset(nullptr, -StackOffset)); 1593 StackOffset += stackGrowth; 1594 } 1595 1596 if (NeedsWinCFI) { 1597 HasWinCFI = true; 1598 BuildMI(MBB, MBBI, DL, TII.get(X86::SEH_PushReg)) 1599 .addImm(Reg) 1600 .setMIFlag(MachineInstr::FrameSetup); 1601 } 1602 } 1603 1604 // Realign stack after we pushed callee-saved registers (so that we'll be 1605 // able to calculate their offsets from the frame pointer). 1606 // Don't do this for Win64, it needs to realign the stack after the prologue. 1607 if (!IsWin64Prologue && !IsFunclet && TRI->hasStackRealignment(MF)) { 1608 assert(HasFP && "There should be a frame pointer if stack is realigned."); 1609 BuildStackAlignAND(MBB, MBBI, DL, StackPtr, MaxAlign); 1610 1611 if (NeedsWinCFI) { 1612 HasWinCFI = true; 1613 BuildMI(MBB, MBBI, DL, TII.get(X86::SEH_StackAlign)) 1614 .addImm(MaxAlign) 1615 .setMIFlag(MachineInstr::FrameSetup); 1616 } 1617 } 1618 1619 // If there is an SUB32ri of ESP immediately before this instruction, merge 1620 // the two. This can be the case when tail call elimination is enabled and 1621 // the callee has more arguments then the caller. 1622 NumBytes -= mergeSPUpdates(MBB, MBBI, true); 1623 1624 // Adjust stack pointer: ESP -= numbytes. 1625 1626 // Windows and cygwin/mingw require a prologue helper routine when allocating 1627 // more than 4K bytes on the stack. Windows uses __chkstk and cygwin/mingw 1628 // uses __alloca. __alloca and the 32-bit version of __chkstk will probe the 1629 // stack and adjust the stack pointer in one go. The 64-bit version of 1630 // __chkstk is only responsible for probing the stack. The 64-bit prologue is 1631 // responsible for adjusting the stack pointer. Touching the stack at 4K 1632 // increments is necessary to ensure that the guard pages used by the OS 1633 // virtual memory manager are allocated in correct sequence. 1634 uint64_t AlignedNumBytes = NumBytes; 1635 if (IsWin64Prologue && !IsFunclet && TRI->hasStackRealignment(MF)) 1636 AlignedNumBytes = alignTo(AlignedNumBytes, MaxAlign); 1637 if (AlignedNumBytes >= StackProbeSize && EmitStackProbeCall) { 1638 assert(!X86FI->getUsesRedZone() && 1639 "The Red Zone is not accounted for in stack probes"); 1640 1641 // Check whether EAX is livein for this block. 1642 bool isEAXAlive = isEAXLiveIn(MBB); 1643 1644 if (isEAXAlive) { 1645 if (Is64Bit) { 1646 // Save RAX 1647 BuildMI(MBB, MBBI, DL, TII.get(X86::PUSH64r)) 1648 .addReg(X86::RAX, RegState::Kill) 1649 .setMIFlag(MachineInstr::FrameSetup); 1650 } else { 1651 // Save EAX 1652 BuildMI(MBB, MBBI, DL, TII.get(X86::PUSH32r)) 1653 .addReg(X86::EAX, RegState::Kill) 1654 .setMIFlag(MachineInstr::FrameSetup); 1655 } 1656 } 1657 1658 if (Is64Bit) { 1659 // Handle the 64-bit Windows ABI case where we need to call __chkstk. 1660 // Function prologue is responsible for adjusting the stack pointer. 1661 int64_t Alloc = isEAXAlive ? NumBytes - 8 : NumBytes; 1662 if (isUInt<32>(Alloc)) { 1663 BuildMI(MBB, MBBI, DL, TII.get(X86::MOV32ri), X86::EAX) 1664 .addImm(Alloc) 1665 .setMIFlag(MachineInstr::FrameSetup); 1666 } else if (isInt<32>(Alloc)) { 1667 BuildMI(MBB, MBBI, DL, TII.get(X86::MOV64ri32), X86::RAX) 1668 .addImm(Alloc) 1669 .setMIFlag(MachineInstr::FrameSetup); 1670 } else { 1671 BuildMI(MBB, MBBI, DL, TII.get(X86::MOV64ri), X86::RAX) 1672 .addImm(Alloc) 1673 .setMIFlag(MachineInstr::FrameSetup); 1674 } 1675 } else { 1676 // Allocate NumBytes-4 bytes on stack in case of isEAXAlive. 1677 // We'll also use 4 already allocated bytes for EAX. 1678 BuildMI(MBB, MBBI, DL, TII.get(X86::MOV32ri), X86::EAX) 1679 .addImm(isEAXAlive ? NumBytes - 4 : NumBytes) 1680 .setMIFlag(MachineInstr::FrameSetup); 1681 } 1682 1683 // Call __chkstk, __chkstk_ms, or __alloca. 1684 emitStackProbe(MF, MBB, MBBI, DL, true); 1685 1686 if (isEAXAlive) { 1687 // Restore RAX/EAX 1688 MachineInstr *MI; 1689 if (Is64Bit) 1690 MI = addRegOffset(BuildMI(MF, DL, TII.get(X86::MOV64rm), X86::RAX), 1691 StackPtr, false, NumBytes - 8); 1692 else 1693 MI = addRegOffset(BuildMI(MF, DL, TII.get(X86::MOV32rm), X86::EAX), 1694 StackPtr, false, NumBytes - 4); 1695 MI->setFlag(MachineInstr::FrameSetup); 1696 MBB.insert(MBBI, MI); 1697 } 1698 } else if (NumBytes) { 1699 emitSPUpdate(MBB, MBBI, DL, -(int64_t)NumBytes, /*InEpilogue=*/false); 1700 } 1701 1702 if (NeedsWinCFI && NumBytes) { 1703 HasWinCFI = true; 1704 BuildMI(MBB, MBBI, DL, TII.get(X86::SEH_StackAlloc)) 1705 .addImm(NumBytes) 1706 .setMIFlag(MachineInstr::FrameSetup); 1707 } 1708 1709 int SEHFrameOffset = 0; 1710 unsigned SPOrEstablisher; 1711 if (IsFunclet) { 1712 if (IsClrFunclet) { 1713 // The establisher parameter passed to a CLR funclet is actually a pointer 1714 // to the (mostly empty) frame of its nearest enclosing funclet; we have 1715 // to find the root function establisher frame by loading the PSPSym from 1716 // the intermediate frame. 1717 unsigned PSPSlotOffset = getPSPSlotOffsetFromSP(MF); 1718 MachinePointerInfo NoInfo; 1719 MBB.addLiveIn(Establisher); 1720 addRegOffset(BuildMI(MBB, MBBI, DL, TII.get(X86::MOV64rm), Establisher), 1721 Establisher, false, PSPSlotOffset) 1722 .addMemOperand(MF.getMachineMemOperand( 1723 NoInfo, MachineMemOperand::MOLoad, SlotSize, Align(SlotSize))); 1724 ; 1725 // Save the root establisher back into the current funclet's (mostly 1726 // empty) frame, in case a sub-funclet or the GC needs it. 1727 addRegOffset(BuildMI(MBB, MBBI, DL, TII.get(X86::MOV64mr)), StackPtr, 1728 false, PSPSlotOffset) 1729 .addReg(Establisher) 1730 .addMemOperand(MF.getMachineMemOperand( 1731 NoInfo, 1732 MachineMemOperand::MOStore | MachineMemOperand::MOVolatile, 1733 SlotSize, Align(SlotSize))); 1734 } 1735 SPOrEstablisher = Establisher; 1736 } else { 1737 SPOrEstablisher = StackPtr; 1738 } 1739 1740 if (IsWin64Prologue && HasFP) { 1741 // Set RBP to a small fixed offset from RSP. In the funclet case, we base 1742 // this calculation on the incoming establisher, which holds the value of 1743 // RSP from the parent frame at the end of the prologue. 1744 SEHFrameOffset = calculateSetFPREG(ParentFrameNumBytes); 1745 if (SEHFrameOffset) 1746 addRegOffset(BuildMI(MBB, MBBI, DL, TII.get(X86::LEA64r), FramePtr), 1747 SPOrEstablisher, false, SEHFrameOffset); 1748 else 1749 BuildMI(MBB, MBBI, DL, TII.get(X86::MOV64rr), FramePtr) 1750 .addReg(SPOrEstablisher); 1751 1752 // If this is not a funclet, emit the CFI describing our frame pointer. 1753 if (NeedsWinCFI && !IsFunclet) { 1754 assert(!NeedsWinFPO && "this setframe incompatible with FPO data"); 1755 HasWinCFI = true; 1756 BuildMI(MBB, MBBI, DL, TII.get(X86::SEH_SetFrame)) 1757 .addImm(FramePtr) 1758 .addImm(SEHFrameOffset) 1759 .setMIFlag(MachineInstr::FrameSetup); 1760 if (isAsynchronousEHPersonality(Personality)) 1761 MF.getWinEHFuncInfo()->SEHSetFrameOffset = SEHFrameOffset; 1762 } 1763 } else if (IsFunclet && STI.is32Bit()) { 1764 // Reset EBP / ESI to something good for funclets. 1765 MBBI = restoreWin32EHStackPointers(MBB, MBBI, DL); 1766 // If we're a catch funclet, we can be returned to via catchret. Save ESP 1767 // into the registration node so that the runtime will restore it for us. 1768 if (!MBB.isCleanupFuncletEntry()) { 1769 assert(Personality == EHPersonality::MSVC_CXX); 1770 Register FrameReg; 1771 int FI = MF.getWinEHFuncInfo()->EHRegNodeFrameIndex; 1772 int64_t EHRegOffset = getFrameIndexReference(MF, FI, FrameReg).getFixed(); 1773 // ESP is the first field, so no extra displacement is needed. 1774 addRegOffset(BuildMI(MBB, MBBI, DL, TII.get(X86::MOV32mr)), FrameReg, 1775 false, EHRegOffset) 1776 .addReg(X86::ESP); 1777 } 1778 } 1779 1780 while (MBBI != MBB.end() && MBBI->getFlag(MachineInstr::FrameSetup)) { 1781 const MachineInstr &FrameInstr = *MBBI; 1782 ++MBBI; 1783 1784 if (NeedsWinCFI) { 1785 int FI; 1786 if (unsigned Reg = TII.isStoreToStackSlot(FrameInstr, FI)) { 1787 if (X86::FR64RegClass.contains(Reg)) { 1788 int Offset; 1789 Register IgnoredFrameReg; 1790 if (IsWin64Prologue && IsFunclet) 1791 Offset = getWin64EHFrameIndexRef(MF, FI, IgnoredFrameReg); 1792 else 1793 Offset = 1794 getFrameIndexReference(MF, FI, IgnoredFrameReg).getFixed() + 1795 SEHFrameOffset; 1796 1797 HasWinCFI = true; 1798 assert(!NeedsWinFPO && "SEH_SaveXMM incompatible with FPO data"); 1799 BuildMI(MBB, MBBI, DL, TII.get(X86::SEH_SaveXMM)) 1800 .addImm(Reg) 1801 .addImm(Offset) 1802 .setMIFlag(MachineInstr::FrameSetup); 1803 } 1804 } 1805 } 1806 } 1807 1808 if (NeedsWinCFI && HasWinCFI) 1809 BuildMI(MBB, MBBI, DL, TII.get(X86::SEH_EndPrologue)) 1810 .setMIFlag(MachineInstr::FrameSetup); 1811 1812 if (FnHasClrFunclet && !IsFunclet) { 1813 // Save the so-called Initial-SP (i.e. the value of the stack pointer 1814 // immediately after the prolog) into the PSPSlot so that funclets 1815 // and the GC can recover it. 1816 unsigned PSPSlotOffset = getPSPSlotOffsetFromSP(MF); 1817 auto PSPInfo = MachinePointerInfo::getFixedStack( 1818 MF, MF.getWinEHFuncInfo()->PSPSymFrameIdx); 1819 addRegOffset(BuildMI(MBB, MBBI, DL, TII.get(X86::MOV64mr)), StackPtr, false, 1820 PSPSlotOffset) 1821 .addReg(StackPtr) 1822 .addMemOperand(MF.getMachineMemOperand( 1823 PSPInfo, MachineMemOperand::MOStore | MachineMemOperand::MOVolatile, 1824 SlotSize, Align(SlotSize))); 1825 } 1826 1827 // Realign stack after we spilled callee-saved registers (so that we'll be 1828 // able to calculate their offsets from the frame pointer). 1829 // Win64 requires aligning the stack after the prologue. 1830 if (IsWin64Prologue && TRI->hasStackRealignment(MF)) { 1831 assert(HasFP && "There should be a frame pointer if stack is realigned."); 1832 BuildStackAlignAND(MBB, MBBI, DL, SPOrEstablisher, MaxAlign); 1833 } 1834 1835 // We already dealt with stack realignment and funclets above. 1836 if (IsFunclet && STI.is32Bit()) 1837 return; 1838 1839 // If we need a base pointer, set it up here. It's whatever the value 1840 // of the stack pointer is at this point. Any variable size objects 1841 // will be allocated after this, so we can still use the base pointer 1842 // to reference locals. 1843 if (TRI->hasBasePointer(MF)) { 1844 // Update the base pointer with the current stack pointer. 1845 unsigned Opc = Uses64BitFramePtr ? X86::MOV64rr : X86::MOV32rr; 1846 BuildMI(MBB, MBBI, DL, TII.get(Opc), BasePtr) 1847 .addReg(SPOrEstablisher) 1848 .setMIFlag(MachineInstr::FrameSetup); 1849 if (X86FI->getRestoreBasePointer()) { 1850 // Stash value of base pointer. Saving RSP instead of EBP shortens 1851 // dependence chain. Used by SjLj EH. 1852 unsigned Opm = Uses64BitFramePtr ? X86::MOV64mr : X86::MOV32mr; 1853 addRegOffset(BuildMI(MBB, MBBI, DL, TII.get(Opm)), 1854 FramePtr, true, X86FI->getRestoreBasePointerOffset()) 1855 .addReg(SPOrEstablisher) 1856 .setMIFlag(MachineInstr::FrameSetup); 1857 } 1858 1859 if (X86FI->getHasSEHFramePtrSave() && !IsFunclet) { 1860 // Stash the value of the frame pointer relative to the base pointer for 1861 // Win32 EH. This supports Win32 EH, which does the inverse of the above: 1862 // it recovers the frame pointer from the base pointer rather than the 1863 // other way around. 1864 unsigned Opm = Uses64BitFramePtr ? X86::MOV64mr : X86::MOV32mr; 1865 Register UsedReg; 1866 int Offset = 1867 getFrameIndexReference(MF, X86FI->getSEHFramePtrSaveIndex(), UsedReg) 1868 .getFixed(); 1869 assert(UsedReg == BasePtr); 1870 addRegOffset(BuildMI(MBB, MBBI, DL, TII.get(Opm)), UsedReg, true, Offset) 1871 .addReg(FramePtr) 1872 .setMIFlag(MachineInstr::FrameSetup); 1873 } 1874 } 1875 1876 if (((!HasFP && NumBytes) || PushedRegs) && NeedsDwarfCFI) { 1877 // Mark end of stack pointer adjustment. 1878 if (!HasFP && NumBytes) { 1879 // Define the current CFA rule to use the provided offset. 1880 assert(StackSize); 1881 BuildCFI( 1882 MBB, MBBI, DL, 1883 MCCFIInstruction::cfiDefCfaOffset(nullptr, StackSize - stackGrowth)); 1884 } 1885 1886 // Emit DWARF info specifying the offsets of the callee-saved registers. 1887 emitCalleeSavedFrameMoves(MBB, MBBI, DL, true); 1888 } 1889 1890 // X86 Interrupt handling function cannot assume anything about the direction 1891 // flag (DF in EFLAGS register). Clear this flag by creating "cld" instruction 1892 // in each prologue of interrupt handler function. 1893 // 1894 // FIXME: Create "cld" instruction only in these cases: 1895 // 1. The interrupt handling function uses any of the "rep" instructions. 1896 // 2. Interrupt handling function calls another function. 1897 // 1898 if (Fn.getCallingConv() == CallingConv::X86_INTR) 1899 BuildMI(MBB, MBBI, DL, TII.get(X86::CLD)) 1900 .setMIFlag(MachineInstr::FrameSetup); 1901 1902 // At this point we know if the function has WinCFI or not. 1903 MF.setHasWinCFI(HasWinCFI); 1904 } 1905 1906 bool X86FrameLowering::canUseLEAForSPInEpilogue( 1907 const MachineFunction &MF) const { 1908 // We can't use LEA instructions for adjusting the stack pointer if we don't 1909 // have a frame pointer in the Win64 ABI. Only ADD instructions may be used 1910 // to deallocate the stack. 1911 // This means that we can use LEA for SP in two situations: 1912 // 1. We *aren't* using the Win64 ABI which means we are free to use LEA. 1913 // 2. We *have* a frame pointer which means we are permitted to use LEA. 1914 return !MF.getTarget().getMCAsmInfo()->usesWindowsCFI() || hasFP(MF); 1915 } 1916 1917 static bool isFuncletReturnInstr(MachineInstr &MI) { 1918 switch (MI.getOpcode()) { 1919 case X86::CATCHRET: 1920 case X86::CLEANUPRET: 1921 return true; 1922 default: 1923 return false; 1924 } 1925 llvm_unreachable("impossible"); 1926 } 1927 1928 // CLR funclets use a special "Previous Stack Pointer Symbol" slot on the 1929 // stack. It holds a pointer to the bottom of the root function frame. The 1930 // establisher frame pointer passed to a nested funclet may point to the 1931 // (mostly empty) frame of its parent funclet, but it will need to find 1932 // the frame of the root function to access locals. To facilitate this, 1933 // every funclet copies the pointer to the bottom of the root function 1934 // frame into a PSPSym slot in its own (mostly empty) stack frame. Using the 1935 // same offset for the PSPSym in the root function frame that's used in the 1936 // funclets' frames allows each funclet to dynamically accept any ancestor 1937 // frame as its establisher argument (the runtime doesn't guarantee the 1938 // immediate parent for some reason lost to history), and also allows the GC, 1939 // which uses the PSPSym for some bookkeeping, to find it in any funclet's 1940 // frame with only a single offset reported for the entire method. 1941 unsigned 1942 X86FrameLowering::getPSPSlotOffsetFromSP(const MachineFunction &MF) const { 1943 const WinEHFuncInfo &Info = *MF.getWinEHFuncInfo(); 1944 Register SPReg; 1945 int Offset = getFrameIndexReferencePreferSP(MF, Info.PSPSymFrameIdx, SPReg, 1946 /*IgnoreSPUpdates*/ true) 1947 .getFixed(); 1948 assert(Offset >= 0 && SPReg == TRI->getStackRegister()); 1949 return static_cast<unsigned>(Offset); 1950 } 1951 1952 unsigned 1953 X86FrameLowering::getWinEHFuncletFrameSize(const MachineFunction &MF) const { 1954 const X86MachineFunctionInfo *X86FI = MF.getInfo<X86MachineFunctionInfo>(); 1955 // This is the size of the pushed CSRs. 1956 unsigned CSSize = X86FI->getCalleeSavedFrameSize(); 1957 // This is the size of callee saved XMMs. 1958 const auto& WinEHXMMSlotInfo = X86FI->getWinEHXMMSlotInfo(); 1959 unsigned XMMSize = WinEHXMMSlotInfo.size() * 1960 TRI->getSpillSize(X86::VR128RegClass); 1961 // This is the amount of stack a funclet needs to allocate. 1962 unsigned UsedSize; 1963 EHPersonality Personality = 1964 classifyEHPersonality(MF.getFunction().getPersonalityFn()); 1965 if (Personality == EHPersonality::CoreCLR) { 1966 // CLR funclets need to hold enough space to include the PSPSym, at the 1967 // same offset from the stack pointer (immediately after the prolog) as it 1968 // resides at in the main function. 1969 UsedSize = getPSPSlotOffsetFromSP(MF) + SlotSize; 1970 } else { 1971 // Other funclets just need enough stack for outgoing call arguments. 1972 UsedSize = MF.getFrameInfo().getMaxCallFrameSize(); 1973 } 1974 // RBP is not included in the callee saved register block. After pushing RBP, 1975 // everything is 16 byte aligned. Everything we allocate before an outgoing 1976 // call must also be 16 byte aligned. 1977 unsigned FrameSizeMinusRBP = alignTo(CSSize + UsedSize, getStackAlign()); 1978 // Subtract out the size of the callee saved registers. This is how much stack 1979 // each funclet will allocate. 1980 return FrameSizeMinusRBP + XMMSize - CSSize; 1981 } 1982 1983 static bool isTailCallOpcode(unsigned Opc) { 1984 return Opc == X86::TCRETURNri || Opc == X86::TCRETURNdi || 1985 Opc == X86::TCRETURNmi || 1986 Opc == X86::TCRETURNri64 || Opc == X86::TCRETURNdi64 || 1987 Opc == X86::TCRETURNmi64; 1988 } 1989 1990 void X86FrameLowering::emitEpilogue(MachineFunction &MF, 1991 MachineBasicBlock &MBB) const { 1992 const MachineFrameInfo &MFI = MF.getFrameInfo(); 1993 X86MachineFunctionInfo *X86FI = MF.getInfo<X86MachineFunctionInfo>(); 1994 MachineBasicBlock::iterator Terminator = MBB.getFirstTerminator(); 1995 MachineBasicBlock::iterator MBBI = Terminator; 1996 DebugLoc DL; 1997 if (MBBI != MBB.end()) 1998 DL = MBBI->getDebugLoc(); 1999 // standard x86_64 and NaCl use 64-bit frame/stack pointers, x32 - 32-bit. 2000 const bool Is64BitILP32 = STI.isTarget64BitILP32(); 2001 Register FramePtr = TRI->getFrameRegister(MF); 2002 Register MachineFramePtr = 2003 Is64BitILP32 ? Register(getX86SubSuperRegister(FramePtr, 64)) : FramePtr; 2004 2005 bool IsWin64Prologue = MF.getTarget().getMCAsmInfo()->usesWindowsCFI(); 2006 bool NeedsWin64CFI = 2007 IsWin64Prologue && MF.getFunction().needsUnwindTableEntry(); 2008 bool IsFunclet = MBBI == MBB.end() ? false : isFuncletReturnInstr(*MBBI); 2009 2010 // Get the number of bytes to allocate from the FrameInfo. 2011 uint64_t StackSize = MFI.getStackSize(); 2012 uint64_t MaxAlign = calculateMaxStackAlign(MF); 2013 unsigned CSSize = X86FI->getCalleeSavedFrameSize(); 2014 unsigned TailCallArgReserveSize = -X86FI->getTCReturnAddrDelta(); 2015 bool HasFP = hasFP(MF); 2016 uint64_t NumBytes = 0; 2017 2018 bool NeedsDwarfCFI = (!MF.getTarget().getTargetTriple().isOSDarwin() && 2019 !MF.getTarget().getTargetTriple().isOSWindows()) && 2020 MF.needsFrameMoves(); 2021 2022 if (IsFunclet) { 2023 assert(HasFP && "EH funclets without FP not yet implemented"); 2024 NumBytes = getWinEHFuncletFrameSize(MF); 2025 } else if (HasFP) { 2026 // Calculate required stack adjustment. 2027 uint64_t FrameSize = StackSize - SlotSize; 2028 NumBytes = FrameSize - CSSize - TailCallArgReserveSize; 2029 2030 // Callee-saved registers were pushed on stack before the stack was 2031 // realigned. 2032 if (TRI->hasStackRealignment(MF) && !IsWin64Prologue) 2033 NumBytes = alignTo(FrameSize, MaxAlign); 2034 } else { 2035 NumBytes = StackSize - CSSize - TailCallArgReserveSize; 2036 } 2037 uint64_t SEHStackAllocAmt = NumBytes; 2038 2039 // AfterPop is the position to insert .cfi_restore. 2040 MachineBasicBlock::iterator AfterPop = MBBI; 2041 if (HasFP) { 2042 if (X86FI->hasSwiftAsyncContext()) { 2043 // Discard the context. 2044 int Offset = 16 + mergeSPUpdates(MBB, MBBI, true); 2045 emitSPUpdate(MBB, MBBI, DL, Offset, /*InEpilogue*/true); 2046 } 2047 // Pop EBP. 2048 BuildMI(MBB, MBBI, DL, TII.get(Is64Bit ? X86::POP64r : X86::POP32r), 2049 MachineFramePtr) 2050 .setMIFlag(MachineInstr::FrameDestroy); 2051 2052 // We need to reset FP to its untagged state on return. Bit 60 is currently 2053 // used to show the presence of an extended frame. 2054 if (X86FI->hasSwiftAsyncContext()) { 2055 BuildMI(MBB, MBBI, DL, TII.get(X86::BTR64ri8), 2056 MachineFramePtr) 2057 .addUse(MachineFramePtr) 2058 .addImm(60) 2059 .setMIFlag(MachineInstr::FrameDestroy); 2060 } 2061 2062 if (NeedsDwarfCFI) { 2063 unsigned DwarfStackPtr = 2064 TRI->getDwarfRegNum(Is64Bit ? X86::RSP : X86::ESP, true); 2065 BuildCFI(MBB, MBBI, DL, 2066 MCCFIInstruction::cfiDefCfa(nullptr, DwarfStackPtr, SlotSize)); 2067 if (!MBB.succ_empty() && !MBB.isReturnBlock()) { 2068 unsigned DwarfFramePtr = TRI->getDwarfRegNum(MachineFramePtr, true); 2069 BuildCFI(MBB, AfterPop, DL, 2070 MCCFIInstruction::createRestore(nullptr, DwarfFramePtr)); 2071 --MBBI; 2072 --AfterPop; 2073 } 2074 --MBBI; 2075 } 2076 } 2077 2078 MachineBasicBlock::iterator FirstCSPop = MBBI; 2079 // Skip the callee-saved pop instructions. 2080 while (MBBI != MBB.begin()) { 2081 MachineBasicBlock::iterator PI = std::prev(MBBI); 2082 unsigned Opc = PI->getOpcode(); 2083 2084 if (Opc != X86::DBG_VALUE && !PI->isTerminator()) { 2085 if ((Opc != X86::POP32r || !PI->getFlag(MachineInstr::FrameDestroy)) && 2086 (Opc != X86::POP64r || !PI->getFlag(MachineInstr::FrameDestroy)) && 2087 (Opc != X86::BTR64ri8 || !PI->getFlag(MachineInstr::FrameDestroy)) && 2088 (Opc != X86::ADD64ri8 || !PI->getFlag(MachineInstr::FrameDestroy))) 2089 break; 2090 FirstCSPop = PI; 2091 } 2092 2093 --MBBI; 2094 } 2095 MBBI = FirstCSPop; 2096 2097 if (IsFunclet && Terminator->getOpcode() == X86::CATCHRET) 2098 emitCatchRetReturnValue(MBB, FirstCSPop, &*Terminator); 2099 2100 if (MBBI != MBB.end()) 2101 DL = MBBI->getDebugLoc(); 2102 // If there is an ADD32ri or SUB32ri of ESP immediately before this 2103 // instruction, merge the two instructions. 2104 if (NumBytes || MFI.hasVarSizedObjects()) 2105 NumBytes += mergeSPUpdates(MBB, MBBI, true); 2106 2107 // If dynamic alloca is used, then reset esp to point to the last callee-saved 2108 // slot before popping them off! Same applies for the case, when stack was 2109 // realigned. Don't do this if this was a funclet epilogue, since the funclets 2110 // will not do realignment or dynamic stack allocation. 2111 if (((TRI->hasStackRealignment(MF)) || MFI.hasVarSizedObjects()) && 2112 !IsFunclet) { 2113 if (TRI->hasStackRealignment(MF)) 2114 MBBI = FirstCSPop; 2115 unsigned SEHFrameOffset = calculateSetFPREG(SEHStackAllocAmt); 2116 uint64_t LEAAmount = 2117 IsWin64Prologue ? SEHStackAllocAmt - SEHFrameOffset : -CSSize; 2118 2119 if (X86FI->hasSwiftAsyncContext()) 2120 LEAAmount -= 16; 2121 2122 // There are only two legal forms of epilogue: 2123 // - add SEHAllocationSize, %rsp 2124 // - lea SEHAllocationSize(%FramePtr), %rsp 2125 // 2126 // 'mov %FramePtr, %rsp' will not be recognized as an epilogue sequence. 2127 // However, we may use this sequence if we have a frame pointer because the 2128 // effects of the prologue can safely be undone. 2129 if (LEAAmount != 0) { 2130 unsigned Opc = getLEArOpcode(Uses64BitFramePtr); 2131 addRegOffset(BuildMI(MBB, MBBI, DL, TII.get(Opc), StackPtr), 2132 FramePtr, false, LEAAmount); 2133 --MBBI; 2134 } else { 2135 unsigned Opc = (Uses64BitFramePtr ? X86::MOV64rr : X86::MOV32rr); 2136 BuildMI(MBB, MBBI, DL, TII.get(Opc), StackPtr) 2137 .addReg(FramePtr); 2138 --MBBI; 2139 } 2140 } else if (NumBytes) { 2141 // Adjust stack pointer back: ESP += numbytes. 2142 emitSPUpdate(MBB, MBBI, DL, NumBytes, /*InEpilogue=*/true); 2143 if (!hasFP(MF) && NeedsDwarfCFI) { 2144 // Define the current CFA rule to use the provided offset. 2145 BuildCFI(MBB, MBBI, DL, 2146 MCCFIInstruction::cfiDefCfaOffset( 2147 nullptr, CSSize + TailCallArgReserveSize + SlotSize)); 2148 } 2149 --MBBI; 2150 } 2151 2152 // Windows unwinder will not invoke function's exception handler if IP is 2153 // either in prologue or in epilogue. This behavior causes a problem when a 2154 // call immediately precedes an epilogue, because the return address points 2155 // into the epilogue. To cope with that, we insert an epilogue marker here, 2156 // then replace it with a 'nop' if it ends up immediately after a CALL in the 2157 // final emitted code. 2158 if (NeedsWin64CFI && MF.hasWinCFI()) 2159 BuildMI(MBB, MBBI, DL, TII.get(X86::SEH_Epilogue)); 2160 2161 if (!hasFP(MF) && NeedsDwarfCFI) { 2162 MBBI = FirstCSPop; 2163 int64_t Offset = -CSSize - SlotSize; 2164 // Mark callee-saved pop instruction. 2165 // Define the current CFA rule to use the provided offset. 2166 while (MBBI != MBB.end()) { 2167 MachineBasicBlock::iterator PI = MBBI; 2168 unsigned Opc = PI->getOpcode(); 2169 ++MBBI; 2170 if (Opc == X86::POP32r || Opc == X86::POP64r) { 2171 Offset += SlotSize; 2172 BuildCFI(MBB, MBBI, DL, 2173 MCCFIInstruction::cfiDefCfaOffset(nullptr, -Offset)); 2174 } 2175 } 2176 } 2177 2178 // Emit DWARF info specifying the restores of the callee-saved registers. 2179 // For epilogue with return inside or being other block without successor, 2180 // no need to generate .cfi_restore for callee-saved registers. 2181 if (NeedsDwarfCFI && !MBB.succ_empty() && !MBB.isReturnBlock()) { 2182 emitCalleeSavedFrameMoves(MBB, AfterPop, DL, false); 2183 } 2184 2185 if (Terminator == MBB.end() || !isTailCallOpcode(Terminator->getOpcode())) { 2186 // Add the return addr area delta back since we are not tail calling. 2187 int Offset = -1 * X86FI->getTCReturnAddrDelta(); 2188 assert(Offset >= 0 && "TCDelta should never be positive"); 2189 if (Offset) { 2190 // Check for possible merge with preceding ADD instruction. 2191 Offset += mergeSPUpdates(MBB, Terminator, true); 2192 emitSPUpdate(MBB, Terminator, DL, Offset, /*InEpilogue=*/true); 2193 } 2194 } 2195 2196 // Emit tilerelease for AMX kernel. 2197 const MachineRegisterInfo &MRI = MF.getRegInfo(); 2198 const TargetRegisterClass *RC = TRI->getRegClass(X86::TILERegClassID); 2199 for (unsigned I = 0; I < RC->getNumRegs(); I++) 2200 if (!MRI.reg_nodbg_empty(X86::TMM0 + I)) { 2201 BuildMI(MBB, Terminator, DL, TII.get(X86::TILERELEASE)); 2202 break; 2203 } 2204 } 2205 2206 StackOffset X86FrameLowering::getFrameIndexReference(const MachineFunction &MF, 2207 int FI, 2208 Register &FrameReg) const { 2209 const MachineFrameInfo &MFI = MF.getFrameInfo(); 2210 2211 bool IsFixed = MFI.isFixedObjectIndex(FI); 2212 // We can't calculate offset from frame pointer if the stack is realigned, 2213 // so enforce usage of stack/base pointer. The base pointer is used when we 2214 // have dynamic allocas in addition to dynamic realignment. 2215 if (TRI->hasBasePointer(MF)) 2216 FrameReg = IsFixed ? TRI->getFramePtr() : TRI->getBaseRegister(); 2217 else if (TRI->hasStackRealignment(MF)) 2218 FrameReg = IsFixed ? TRI->getFramePtr() : TRI->getStackRegister(); 2219 else 2220 FrameReg = TRI->getFrameRegister(MF); 2221 2222 // Offset will hold the offset from the stack pointer at function entry to the 2223 // object. 2224 // We need to factor in additional offsets applied during the prologue to the 2225 // frame, base, and stack pointer depending on which is used. 2226 int Offset = MFI.getObjectOffset(FI) - getOffsetOfLocalArea(); 2227 const X86MachineFunctionInfo *X86FI = MF.getInfo<X86MachineFunctionInfo>(); 2228 unsigned CSSize = X86FI->getCalleeSavedFrameSize(); 2229 uint64_t StackSize = MFI.getStackSize(); 2230 bool IsWin64Prologue = MF.getTarget().getMCAsmInfo()->usesWindowsCFI(); 2231 int64_t FPDelta = 0; 2232 2233 // In an x86 interrupt, remove the offset we added to account for the return 2234 // address from any stack object allocated in the caller's frame. Interrupts 2235 // do not have a standard return address. Fixed objects in the current frame, 2236 // such as SSE register spills, should not get this treatment. 2237 if (MF.getFunction().getCallingConv() == CallingConv::X86_INTR && 2238 Offset >= 0) { 2239 Offset += getOffsetOfLocalArea(); 2240 } 2241 2242 if (IsWin64Prologue) { 2243 assert(!MFI.hasCalls() || (StackSize % 16) == 8); 2244 2245 // Calculate required stack adjustment. 2246 uint64_t FrameSize = StackSize - SlotSize; 2247 // If required, include space for extra hidden slot for stashing base pointer. 2248 if (X86FI->getRestoreBasePointer()) 2249 FrameSize += SlotSize; 2250 uint64_t NumBytes = FrameSize - CSSize; 2251 2252 uint64_t SEHFrameOffset = calculateSetFPREG(NumBytes); 2253 if (FI && FI == X86FI->getFAIndex()) 2254 return StackOffset::getFixed(-SEHFrameOffset); 2255 2256 // FPDelta is the offset from the "traditional" FP location of the old base 2257 // pointer followed by return address and the location required by the 2258 // restricted Win64 prologue. 2259 // Add FPDelta to all offsets below that go through the frame pointer. 2260 FPDelta = FrameSize - SEHFrameOffset; 2261 assert((!MFI.hasCalls() || (FPDelta % 16) == 0) && 2262 "FPDelta isn't aligned per the Win64 ABI!"); 2263 } 2264 2265 if (FrameReg == TRI->getFramePtr()) { 2266 // Skip saved EBP/RBP 2267 Offset += SlotSize; 2268 2269 // Account for restricted Windows prologue. 2270 Offset += FPDelta; 2271 2272 // Skip the RETADDR move area 2273 int TailCallReturnAddrDelta = X86FI->getTCReturnAddrDelta(); 2274 if (TailCallReturnAddrDelta < 0) 2275 Offset -= TailCallReturnAddrDelta; 2276 2277 return StackOffset::getFixed(Offset); 2278 } 2279 2280 // FrameReg is either the stack pointer or a base pointer. But the base is 2281 // located at the end of the statically known StackSize so the distinction 2282 // doesn't really matter. 2283 if (TRI->hasStackRealignment(MF) || TRI->hasBasePointer(MF)) 2284 assert(isAligned(MFI.getObjectAlign(FI), -(Offset + StackSize))); 2285 return StackOffset::getFixed(Offset + StackSize); 2286 } 2287 2288 int X86FrameLowering::getWin64EHFrameIndexRef(const MachineFunction &MF, int FI, 2289 Register &FrameReg) const { 2290 const MachineFrameInfo &MFI = MF.getFrameInfo(); 2291 const X86MachineFunctionInfo *X86FI = MF.getInfo<X86MachineFunctionInfo>(); 2292 const auto& WinEHXMMSlotInfo = X86FI->getWinEHXMMSlotInfo(); 2293 const auto it = WinEHXMMSlotInfo.find(FI); 2294 2295 if (it == WinEHXMMSlotInfo.end()) 2296 return getFrameIndexReference(MF, FI, FrameReg).getFixed(); 2297 2298 FrameReg = TRI->getStackRegister(); 2299 return alignDown(MFI.getMaxCallFrameSize(), getStackAlign().value()) + 2300 it->second; 2301 } 2302 2303 StackOffset 2304 X86FrameLowering::getFrameIndexReferenceSP(const MachineFunction &MF, int FI, 2305 Register &FrameReg, 2306 int Adjustment) const { 2307 const MachineFrameInfo &MFI = MF.getFrameInfo(); 2308 FrameReg = TRI->getStackRegister(); 2309 return StackOffset::getFixed(MFI.getObjectOffset(FI) - 2310 getOffsetOfLocalArea() + Adjustment); 2311 } 2312 2313 StackOffset 2314 X86FrameLowering::getFrameIndexReferencePreferSP(const MachineFunction &MF, 2315 int FI, Register &FrameReg, 2316 bool IgnoreSPUpdates) const { 2317 2318 const MachineFrameInfo &MFI = MF.getFrameInfo(); 2319 // Does not include any dynamic realign. 2320 const uint64_t StackSize = MFI.getStackSize(); 2321 // LLVM arranges the stack as follows: 2322 // ... 2323 // ARG2 2324 // ARG1 2325 // RETADDR 2326 // PUSH RBP <-- RBP points here 2327 // PUSH CSRs 2328 // ~~~~~~~ <-- possible stack realignment (non-win64) 2329 // ... 2330 // STACK OBJECTS 2331 // ... <-- RSP after prologue points here 2332 // ~~~~~~~ <-- possible stack realignment (win64) 2333 // 2334 // if (hasVarSizedObjects()): 2335 // ... <-- "base pointer" (ESI/RBX) points here 2336 // DYNAMIC ALLOCAS 2337 // ... <-- RSP points here 2338 // 2339 // Case 1: In the simple case of no stack realignment and no dynamic 2340 // allocas, both "fixed" stack objects (arguments and CSRs) are addressable 2341 // with fixed offsets from RSP. 2342 // 2343 // Case 2: In the case of stack realignment with no dynamic allocas, fixed 2344 // stack objects are addressed with RBP and regular stack objects with RSP. 2345 // 2346 // Case 3: In the case of dynamic allocas and stack realignment, RSP is used 2347 // to address stack arguments for outgoing calls and nothing else. The "base 2348 // pointer" points to local variables, and RBP points to fixed objects. 2349 // 2350 // In cases 2 and 3, we can only answer for non-fixed stack objects, and the 2351 // answer we give is relative to the SP after the prologue, and not the 2352 // SP in the middle of the function. 2353 2354 if (MFI.isFixedObjectIndex(FI) && TRI->hasStackRealignment(MF) && 2355 !STI.isTargetWin64()) 2356 return getFrameIndexReference(MF, FI, FrameReg); 2357 2358 // If !hasReservedCallFrame the function might have SP adjustement in the 2359 // body. So, even though the offset is statically known, it depends on where 2360 // we are in the function. 2361 if (!IgnoreSPUpdates && !hasReservedCallFrame(MF)) 2362 return getFrameIndexReference(MF, FI, FrameReg); 2363 2364 // We don't handle tail calls, and shouldn't be seeing them either. 2365 assert(MF.getInfo<X86MachineFunctionInfo>()->getTCReturnAddrDelta() >= 0 && 2366 "we don't handle this case!"); 2367 2368 // This is how the math works out: 2369 // 2370 // %rsp grows (i.e. gets lower) left to right. Each box below is 2371 // one word (eight bytes). Obj0 is the stack slot we're trying to 2372 // get to. 2373 // 2374 // ---------------------------------- 2375 // | BP | Obj0 | Obj1 | ... | ObjN | 2376 // ---------------------------------- 2377 // ^ ^ ^ ^ 2378 // A B C E 2379 // 2380 // A is the incoming stack pointer. 2381 // (B - A) is the local area offset (-8 for x86-64) [1] 2382 // (C - A) is the Offset returned by MFI.getObjectOffset for Obj0 [2] 2383 // 2384 // |(E - B)| is the StackSize (absolute value, positive). For a 2385 // stack that grown down, this works out to be (B - E). [3] 2386 // 2387 // E is also the value of %rsp after stack has been set up, and we 2388 // want (C - E) -- the value we can add to %rsp to get to Obj0. Now 2389 // (C - E) == (C - A) - (B - A) + (B - E) 2390 // { Using [1], [2] and [3] above } 2391 // == getObjectOffset - LocalAreaOffset + StackSize 2392 2393 return getFrameIndexReferenceSP(MF, FI, FrameReg, StackSize); 2394 } 2395 2396 bool X86FrameLowering::assignCalleeSavedSpillSlots( 2397 MachineFunction &MF, const TargetRegisterInfo *TRI, 2398 std::vector<CalleeSavedInfo> &CSI) const { 2399 MachineFrameInfo &MFI = MF.getFrameInfo(); 2400 X86MachineFunctionInfo *X86FI = MF.getInfo<X86MachineFunctionInfo>(); 2401 2402 unsigned CalleeSavedFrameSize = 0; 2403 unsigned XMMCalleeSavedFrameSize = 0; 2404 auto &WinEHXMMSlotInfo = X86FI->getWinEHXMMSlotInfo(); 2405 int SpillSlotOffset = getOffsetOfLocalArea() + X86FI->getTCReturnAddrDelta(); 2406 2407 int64_t TailCallReturnAddrDelta = X86FI->getTCReturnAddrDelta(); 2408 2409 if (TailCallReturnAddrDelta < 0) { 2410 // create RETURNADDR area 2411 // arg 2412 // arg 2413 // RETADDR 2414 // { ... 2415 // RETADDR area 2416 // ... 2417 // } 2418 // [EBP] 2419 MFI.CreateFixedObject(-TailCallReturnAddrDelta, 2420 TailCallReturnAddrDelta - SlotSize, true); 2421 } 2422 2423 // Spill the BasePtr if it's used. 2424 if (this->TRI->hasBasePointer(MF)) { 2425 // Allocate a spill slot for EBP if we have a base pointer and EH funclets. 2426 if (MF.hasEHFunclets()) { 2427 int FI = MFI.CreateSpillStackObject(SlotSize, Align(SlotSize)); 2428 X86FI->setHasSEHFramePtrSave(true); 2429 X86FI->setSEHFramePtrSaveIndex(FI); 2430 } 2431 } 2432 2433 if (hasFP(MF)) { 2434 // emitPrologue always spills frame register the first thing. 2435 SpillSlotOffset -= SlotSize; 2436 MFI.CreateFixedSpillStackObject(SlotSize, SpillSlotOffset); 2437 2438 // The async context lives directly before the frame pointer, and we 2439 // allocate a second slot to preserve stack alignment. 2440 if (X86FI->hasSwiftAsyncContext()) { 2441 SpillSlotOffset -= SlotSize; 2442 MFI.CreateFixedSpillStackObject(SlotSize, SpillSlotOffset); 2443 SpillSlotOffset -= SlotSize; 2444 } 2445 2446 // Since emitPrologue and emitEpilogue will handle spilling and restoring of 2447 // the frame register, we can delete it from CSI list and not have to worry 2448 // about avoiding it later. 2449 Register FPReg = TRI->getFrameRegister(MF); 2450 for (unsigned i = 0; i < CSI.size(); ++i) { 2451 if (TRI->regsOverlap(CSI[i].getReg(),FPReg)) { 2452 CSI.erase(CSI.begin() + i); 2453 break; 2454 } 2455 } 2456 } 2457 2458 // Assign slots for GPRs. It increases frame size. 2459 for (unsigned i = CSI.size(); i != 0; --i) { 2460 unsigned Reg = CSI[i - 1].getReg(); 2461 2462 if (!X86::GR64RegClass.contains(Reg) && !X86::GR32RegClass.contains(Reg)) 2463 continue; 2464 2465 SpillSlotOffset -= SlotSize; 2466 CalleeSavedFrameSize += SlotSize; 2467 2468 int SlotIndex = MFI.CreateFixedSpillStackObject(SlotSize, SpillSlotOffset); 2469 CSI[i - 1].setFrameIdx(SlotIndex); 2470 } 2471 2472 X86FI->setCalleeSavedFrameSize(CalleeSavedFrameSize); 2473 MFI.setCVBytesOfCalleeSavedRegisters(CalleeSavedFrameSize); 2474 2475 // Assign slots for XMMs. 2476 for (unsigned i = CSI.size(); i != 0; --i) { 2477 unsigned Reg = CSI[i - 1].getReg(); 2478 if (X86::GR64RegClass.contains(Reg) || X86::GR32RegClass.contains(Reg)) 2479 continue; 2480 2481 // If this is k-register make sure we lookup via the largest legal type. 2482 MVT VT = MVT::Other; 2483 if (X86::VK16RegClass.contains(Reg)) 2484 VT = STI.hasBWI() ? MVT::v64i1 : MVT::v16i1; 2485 2486 const TargetRegisterClass *RC = TRI->getMinimalPhysRegClass(Reg, VT); 2487 unsigned Size = TRI->getSpillSize(*RC); 2488 Align Alignment = TRI->getSpillAlign(*RC); 2489 // ensure alignment 2490 assert(SpillSlotOffset < 0 && "SpillSlotOffset should always < 0 on X86"); 2491 SpillSlotOffset = -alignTo(-SpillSlotOffset, Alignment); 2492 2493 // spill into slot 2494 SpillSlotOffset -= Size; 2495 int SlotIndex = MFI.CreateFixedSpillStackObject(Size, SpillSlotOffset); 2496 CSI[i - 1].setFrameIdx(SlotIndex); 2497 MFI.ensureMaxAlignment(Alignment); 2498 2499 // Save the start offset and size of XMM in stack frame for funclets. 2500 if (X86::VR128RegClass.contains(Reg)) { 2501 WinEHXMMSlotInfo[SlotIndex] = XMMCalleeSavedFrameSize; 2502 XMMCalleeSavedFrameSize += Size; 2503 } 2504 } 2505 2506 return true; 2507 } 2508 2509 bool X86FrameLowering::spillCalleeSavedRegisters( 2510 MachineBasicBlock &MBB, MachineBasicBlock::iterator MI, 2511 ArrayRef<CalleeSavedInfo> CSI, const TargetRegisterInfo *TRI) const { 2512 DebugLoc DL = MBB.findDebugLoc(MI); 2513 2514 // Don't save CSRs in 32-bit EH funclets. The caller saves EBX, EBP, ESI, EDI 2515 // for us, and there are no XMM CSRs on Win32. 2516 if (MBB.isEHFuncletEntry() && STI.is32Bit() && STI.isOSWindows()) 2517 return true; 2518 2519 // Push GPRs. It increases frame size. 2520 const MachineFunction &MF = *MBB.getParent(); 2521 unsigned Opc = STI.is64Bit() ? X86::PUSH64r : X86::PUSH32r; 2522 for (unsigned i = CSI.size(); i != 0; --i) { 2523 unsigned Reg = CSI[i - 1].getReg(); 2524 2525 if (!X86::GR64RegClass.contains(Reg) && !X86::GR32RegClass.contains(Reg)) 2526 continue; 2527 2528 const MachineRegisterInfo &MRI = MF.getRegInfo(); 2529 bool isLiveIn = MRI.isLiveIn(Reg); 2530 if (!isLiveIn) 2531 MBB.addLiveIn(Reg); 2532 2533 // Decide whether we can add a kill flag to the use. 2534 bool CanKill = !isLiveIn; 2535 // Check if any subregister is live-in 2536 if (CanKill) { 2537 for (MCRegAliasIterator AReg(Reg, TRI, false); AReg.isValid(); ++AReg) { 2538 if (MRI.isLiveIn(*AReg)) { 2539 CanKill = false; 2540 break; 2541 } 2542 } 2543 } 2544 2545 // Do not set a kill flag on values that are also marked as live-in. This 2546 // happens with the @llvm-returnaddress intrinsic and with arguments 2547 // passed in callee saved registers. 2548 // Omitting the kill flags is conservatively correct even if the live-in 2549 // is not used after all. 2550 BuildMI(MBB, MI, DL, TII.get(Opc)).addReg(Reg, getKillRegState(CanKill)) 2551 .setMIFlag(MachineInstr::FrameSetup); 2552 } 2553 2554 // Make XMM regs spilled. X86 does not have ability of push/pop XMM. 2555 // It can be done by spilling XMMs to stack frame. 2556 for (unsigned i = CSI.size(); i != 0; --i) { 2557 unsigned Reg = CSI[i-1].getReg(); 2558 if (X86::GR64RegClass.contains(Reg) || X86::GR32RegClass.contains(Reg)) 2559 continue; 2560 2561 // If this is k-register make sure we lookup via the largest legal type. 2562 MVT VT = MVT::Other; 2563 if (X86::VK16RegClass.contains(Reg)) 2564 VT = STI.hasBWI() ? MVT::v64i1 : MVT::v16i1; 2565 2566 // Add the callee-saved register as live-in. It's killed at the spill. 2567 MBB.addLiveIn(Reg); 2568 const TargetRegisterClass *RC = TRI->getMinimalPhysRegClass(Reg, VT); 2569 2570 TII.storeRegToStackSlot(MBB, MI, Reg, true, CSI[i - 1].getFrameIdx(), RC, 2571 TRI); 2572 --MI; 2573 MI->setFlag(MachineInstr::FrameSetup); 2574 ++MI; 2575 } 2576 2577 return true; 2578 } 2579 2580 void X86FrameLowering::emitCatchRetReturnValue(MachineBasicBlock &MBB, 2581 MachineBasicBlock::iterator MBBI, 2582 MachineInstr *CatchRet) const { 2583 // SEH shouldn't use catchret. 2584 assert(!isAsynchronousEHPersonality(classifyEHPersonality( 2585 MBB.getParent()->getFunction().getPersonalityFn())) && 2586 "SEH should not use CATCHRET"); 2587 const DebugLoc &DL = CatchRet->getDebugLoc(); 2588 MachineBasicBlock *CatchRetTarget = CatchRet->getOperand(0).getMBB(); 2589 2590 // Fill EAX/RAX with the address of the target block. 2591 if (STI.is64Bit()) { 2592 // LEA64r CatchRetTarget(%rip), %rax 2593 BuildMI(MBB, MBBI, DL, TII.get(X86::LEA64r), X86::RAX) 2594 .addReg(X86::RIP) 2595 .addImm(0) 2596 .addReg(0) 2597 .addMBB(CatchRetTarget) 2598 .addReg(0); 2599 } else { 2600 // MOV32ri $CatchRetTarget, %eax 2601 BuildMI(MBB, MBBI, DL, TII.get(X86::MOV32ri), X86::EAX) 2602 .addMBB(CatchRetTarget); 2603 } 2604 2605 // Record that we've taken the address of CatchRetTarget and no longer just 2606 // reference it in a terminator. 2607 CatchRetTarget->setHasAddressTaken(); 2608 } 2609 2610 bool X86FrameLowering::restoreCalleeSavedRegisters( 2611 MachineBasicBlock &MBB, MachineBasicBlock::iterator MI, 2612 MutableArrayRef<CalleeSavedInfo> CSI, const TargetRegisterInfo *TRI) const { 2613 if (CSI.empty()) 2614 return false; 2615 2616 if (MI != MBB.end() && isFuncletReturnInstr(*MI) && STI.isOSWindows()) { 2617 // Don't restore CSRs in 32-bit EH funclets. Matches 2618 // spillCalleeSavedRegisters. 2619 if (STI.is32Bit()) 2620 return true; 2621 // Don't restore CSRs before an SEH catchret. SEH except blocks do not form 2622 // funclets. emitEpilogue transforms these to normal jumps. 2623 if (MI->getOpcode() == X86::CATCHRET) { 2624 const Function &F = MBB.getParent()->getFunction(); 2625 bool IsSEH = isAsynchronousEHPersonality( 2626 classifyEHPersonality(F.getPersonalityFn())); 2627 if (IsSEH) 2628 return true; 2629 } 2630 } 2631 2632 DebugLoc DL = MBB.findDebugLoc(MI); 2633 2634 // Reload XMMs from stack frame. 2635 for (unsigned i = 0, e = CSI.size(); i != e; ++i) { 2636 unsigned Reg = CSI[i].getReg(); 2637 if (X86::GR64RegClass.contains(Reg) || 2638 X86::GR32RegClass.contains(Reg)) 2639 continue; 2640 2641 // If this is k-register make sure we lookup via the largest legal type. 2642 MVT VT = MVT::Other; 2643 if (X86::VK16RegClass.contains(Reg)) 2644 VT = STI.hasBWI() ? MVT::v64i1 : MVT::v16i1; 2645 2646 const TargetRegisterClass *RC = TRI->getMinimalPhysRegClass(Reg, VT); 2647 TII.loadRegFromStackSlot(MBB, MI, Reg, CSI[i].getFrameIdx(), RC, TRI); 2648 } 2649 2650 // POP GPRs. 2651 unsigned Opc = STI.is64Bit() ? X86::POP64r : X86::POP32r; 2652 for (unsigned i = 0, e = CSI.size(); i != e; ++i) { 2653 unsigned Reg = CSI[i].getReg(); 2654 if (!X86::GR64RegClass.contains(Reg) && 2655 !X86::GR32RegClass.contains(Reg)) 2656 continue; 2657 2658 BuildMI(MBB, MI, DL, TII.get(Opc), Reg) 2659 .setMIFlag(MachineInstr::FrameDestroy); 2660 } 2661 return true; 2662 } 2663 2664 void X86FrameLowering::determineCalleeSaves(MachineFunction &MF, 2665 BitVector &SavedRegs, 2666 RegScavenger *RS) const { 2667 TargetFrameLowering::determineCalleeSaves(MF, SavedRegs, RS); 2668 2669 // Spill the BasePtr if it's used. 2670 if (TRI->hasBasePointer(MF)){ 2671 Register BasePtr = TRI->getBaseRegister(); 2672 if (STI.isTarget64BitILP32()) 2673 BasePtr = getX86SubSuperRegister(BasePtr, 64); 2674 SavedRegs.set(BasePtr); 2675 } 2676 } 2677 2678 static bool 2679 HasNestArgument(const MachineFunction *MF) { 2680 const Function &F = MF->getFunction(); 2681 for (Function::const_arg_iterator I = F.arg_begin(), E = F.arg_end(); 2682 I != E; I++) { 2683 if (I->hasNestAttr() && !I->use_empty()) 2684 return true; 2685 } 2686 return false; 2687 } 2688 2689 /// GetScratchRegister - Get a temp register for performing work in the 2690 /// segmented stack and the Erlang/HiPE stack prologue. Depending on platform 2691 /// and the properties of the function either one or two registers will be 2692 /// needed. Set primary to true for the first register, false for the second. 2693 static unsigned 2694 GetScratchRegister(bool Is64Bit, bool IsLP64, const MachineFunction &MF, bool Primary) { 2695 CallingConv::ID CallingConvention = MF.getFunction().getCallingConv(); 2696 2697 // Erlang stuff. 2698 if (CallingConvention == CallingConv::HiPE) { 2699 if (Is64Bit) 2700 return Primary ? X86::R14 : X86::R13; 2701 else 2702 return Primary ? X86::EBX : X86::EDI; 2703 } 2704 2705 if (Is64Bit) { 2706 if (IsLP64) 2707 return Primary ? X86::R11 : X86::R12; 2708 else 2709 return Primary ? X86::R11D : X86::R12D; 2710 } 2711 2712 bool IsNested = HasNestArgument(&MF); 2713 2714 if (CallingConvention == CallingConv::X86_FastCall || 2715 CallingConvention == CallingConv::Fast || 2716 CallingConvention == CallingConv::Tail) { 2717 if (IsNested) 2718 report_fatal_error("Segmented stacks does not support fastcall with " 2719 "nested function."); 2720 return Primary ? X86::EAX : X86::ECX; 2721 } 2722 if (IsNested) 2723 return Primary ? X86::EDX : X86::EAX; 2724 return Primary ? X86::ECX : X86::EAX; 2725 } 2726 2727 // The stack limit in the TCB is set to this many bytes above the actual stack 2728 // limit. 2729 static const uint64_t kSplitStackAvailable = 256; 2730 2731 void X86FrameLowering::adjustForSegmentedStacks( 2732 MachineFunction &MF, MachineBasicBlock &PrologueMBB) const { 2733 MachineFrameInfo &MFI = MF.getFrameInfo(); 2734 uint64_t StackSize; 2735 unsigned TlsReg, TlsOffset; 2736 DebugLoc DL; 2737 2738 // To support shrink-wrapping we would need to insert the new blocks 2739 // at the right place and update the branches to PrologueMBB. 2740 assert(&(*MF.begin()) == &PrologueMBB && "Shrink-wrapping not supported yet"); 2741 2742 unsigned ScratchReg = GetScratchRegister(Is64Bit, IsLP64, MF, true); 2743 assert(!MF.getRegInfo().isLiveIn(ScratchReg) && 2744 "Scratch register is live-in"); 2745 2746 if (MF.getFunction().isVarArg()) 2747 report_fatal_error("Segmented stacks do not support vararg functions."); 2748 if (!STI.isTargetLinux() && !STI.isTargetDarwin() && !STI.isTargetWin32() && 2749 !STI.isTargetWin64() && !STI.isTargetFreeBSD() && 2750 !STI.isTargetDragonFly()) 2751 report_fatal_error("Segmented stacks not supported on this platform."); 2752 2753 // Eventually StackSize will be calculated by a link-time pass; which will 2754 // also decide whether checking code needs to be injected into this particular 2755 // prologue. 2756 StackSize = MFI.getStackSize(); 2757 2758 // Do not generate a prologue for leaf functions with a stack of size zero. 2759 // For non-leaf functions we have to allow for the possibility that the 2760 // callis to a non-split function, as in PR37807. This function could also 2761 // take the address of a non-split function. When the linker tries to adjust 2762 // its non-existent prologue, it would fail with an error. Mark the object 2763 // file so that such failures are not errors. See this Go language bug-report 2764 // https://go-review.googlesource.com/c/go/+/148819/ 2765 if (StackSize == 0 && !MFI.hasTailCall()) { 2766 MF.getMMI().setHasNosplitStack(true); 2767 return; 2768 } 2769 2770 MachineBasicBlock *allocMBB = MF.CreateMachineBasicBlock(); 2771 MachineBasicBlock *checkMBB = MF.CreateMachineBasicBlock(); 2772 X86MachineFunctionInfo *X86FI = MF.getInfo<X86MachineFunctionInfo>(); 2773 bool IsNested = false; 2774 2775 // We need to know if the function has a nest argument only in 64 bit mode. 2776 if (Is64Bit) 2777 IsNested = HasNestArgument(&MF); 2778 2779 // The MOV R10, RAX needs to be in a different block, since the RET we emit in 2780 // allocMBB needs to be last (terminating) instruction. 2781 2782 for (const auto &LI : PrologueMBB.liveins()) { 2783 allocMBB->addLiveIn(LI); 2784 checkMBB->addLiveIn(LI); 2785 } 2786 2787 if (IsNested) 2788 allocMBB->addLiveIn(IsLP64 ? X86::R10 : X86::R10D); 2789 2790 MF.push_front(allocMBB); 2791 MF.push_front(checkMBB); 2792 2793 // When the frame size is less than 256 we just compare the stack 2794 // boundary directly to the value of the stack pointer, per gcc. 2795 bool CompareStackPointer = StackSize < kSplitStackAvailable; 2796 2797 // Read the limit off the current stacklet off the stack_guard location. 2798 if (Is64Bit) { 2799 if (STI.isTargetLinux()) { 2800 TlsReg = X86::FS; 2801 TlsOffset = IsLP64 ? 0x70 : 0x40; 2802 } else if (STI.isTargetDarwin()) { 2803 TlsReg = X86::GS; 2804 TlsOffset = 0x60 + 90*8; // See pthread_machdep.h. Steal TLS slot 90. 2805 } else if (STI.isTargetWin64()) { 2806 TlsReg = X86::GS; 2807 TlsOffset = 0x28; // pvArbitrary, reserved for application use 2808 } else if (STI.isTargetFreeBSD()) { 2809 TlsReg = X86::FS; 2810 TlsOffset = 0x18; 2811 } else if (STI.isTargetDragonFly()) { 2812 TlsReg = X86::FS; 2813 TlsOffset = 0x20; // use tls_tcb.tcb_segstack 2814 } else { 2815 report_fatal_error("Segmented stacks not supported on this platform."); 2816 } 2817 2818 if (CompareStackPointer) 2819 ScratchReg = IsLP64 ? X86::RSP : X86::ESP; 2820 else 2821 BuildMI(checkMBB, DL, TII.get(IsLP64 ? X86::LEA64r : X86::LEA64_32r), ScratchReg).addReg(X86::RSP) 2822 .addImm(1).addReg(0).addImm(-StackSize).addReg(0); 2823 2824 BuildMI(checkMBB, DL, TII.get(IsLP64 ? X86::CMP64rm : X86::CMP32rm)).addReg(ScratchReg) 2825 .addReg(0).addImm(1).addReg(0).addImm(TlsOffset).addReg(TlsReg); 2826 } else { 2827 if (STI.isTargetLinux()) { 2828 TlsReg = X86::GS; 2829 TlsOffset = 0x30; 2830 } else if (STI.isTargetDarwin()) { 2831 TlsReg = X86::GS; 2832 TlsOffset = 0x48 + 90*4; 2833 } else if (STI.isTargetWin32()) { 2834 TlsReg = X86::FS; 2835 TlsOffset = 0x14; // pvArbitrary, reserved for application use 2836 } else if (STI.isTargetDragonFly()) { 2837 TlsReg = X86::FS; 2838 TlsOffset = 0x10; // use tls_tcb.tcb_segstack 2839 } else if (STI.isTargetFreeBSD()) { 2840 report_fatal_error("Segmented stacks not supported on FreeBSD i386."); 2841 } else { 2842 report_fatal_error("Segmented stacks not supported on this platform."); 2843 } 2844 2845 if (CompareStackPointer) 2846 ScratchReg = X86::ESP; 2847 else 2848 BuildMI(checkMBB, DL, TII.get(X86::LEA32r), ScratchReg).addReg(X86::ESP) 2849 .addImm(1).addReg(0).addImm(-StackSize).addReg(0); 2850 2851 if (STI.isTargetLinux() || STI.isTargetWin32() || STI.isTargetWin64() || 2852 STI.isTargetDragonFly()) { 2853 BuildMI(checkMBB, DL, TII.get(X86::CMP32rm)).addReg(ScratchReg) 2854 .addReg(0).addImm(0).addReg(0).addImm(TlsOffset).addReg(TlsReg); 2855 } else if (STI.isTargetDarwin()) { 2856 2857 // TlsOffset doesn't fit into a mod r/m byte so we need an extra register. 2858 unsigned ScratchReg2; 2859 bool SaveScratch2; 2860 if (CompareStackPointer) { 2861 // The primary scratch register is available for holding the TLS offset. 2862 ScratchReg2 = GetScratchRegister(Is64Bit, IsLP64, MF, true); 2863 SaveScratch2 = false; 2864 } else { 2865 // Need to use a second register to hold the TLS offset 2866 ScratchReg2 = GetScratchRegister(Is64Bit, IsLP64, MF, false); 2867 2868 // Unfortunately, with fastcc the second scratch register may hold an 2869 // argument. 2870 SaveScratch2 = MF.getRegInfo().isLiveIn(ScratchReg2); 2871 } 2872 2873 // If Scratch2 is live-in then it needs to be saved. 2874 assert((!MF.getRegInfo().isLiveIn(ScratchReg2) || SaveScratch2) && 2875 "Scratch register is live-in and not saved"); 2876 2877 if (SaveScratch2) 2878 BuildMI(checkMBB, DL, TII.get(X86::PUSH32r)) 2879 .addReg(ScratchReg2, RegState::Kill); 2880 2881 BuildMI(checkMBB, DL, TII.get(X86::MOV32ri), ScratchReg2) 2882 .addImm(TlsOffset); 2883 BuildMI(checkMBB, DL, TII.get(X86::CMP32rm)) 2884 .addReg(ScratchReg) 2885 .addReg(ScratchReg2).addImm(1).addReg(0) 2886 .addImm(0) 2887 .addReg(TlsReg); 2888 2889 if (SaveScratch2) 2890 BuildMI(checkMBB, DL, TII.get(X86::POP32r), ScratchReg2); 2891 } 2892 } 2893 2894 // This jump is taken if SP >= (Stacklet Limit + Stack Space required). 2895 // It jumps to normal execution of the function body. 2896 BuildMI(checkMBB, DL, TII.get(X86::JCC_1)).addMBB(&PrologueMBB).addImm(X86::COND_A); 2897 2898 // On 32 bit we first push the arguments size and then the frame size. On 64 2899 // bit, we pass the stack frame size in r10 and the argument size in r11. 2900 if (Is64Bit) { 2901 // Functions with nested arguments use R10, so it needs to be saved across 2902 // the call to _morestack 2903 2904 const unsigned RegAX = IsLP64 ? X86::RAX : X86::EAX; 2905 const unsigned Reg10 = IsLP64 ? X86::R10 : X86::R10D; 2906 const unsigned Reg11 = IsLP64 ? X86::R11 : X86::R11D; 2907 const unsigned MOVrr = IsLP64 ? X86::MOV64rr : X86::MOV32rr; 2908 const unsigned MOVri = IsLP64 ? X86::MOV64ri : X86::MOV32ri; 2909 2910 if (IsNested) 2911 BuildMI(allocMBB, DL, TII.get(MOVrr), RegAX).addReg(Reg10); 2912 2913 BuildMI(allocMBB, DL, TII.get(MOVri), Reg10) 2914 .addImm(StackSize); 2915 BuildMI(allocMBB, DL, TII.get(MOVri), Reg11) 2916 .addImm(X86FI->getArgumentStackSize()); 2917 } else { 2918 BuildMI(allocMBB, DL, TII.get(X86::PUSHi32)) 2919 .addImm(X86FI->getArgumentStackSize()); 2920 BuildMI(allocMBB, DL, TII.get(X86::PUSHi32)) 2921 .addImm(StackSize); 2922 } 2923 2924 // __morestack is in libgcc 2925 if (Is64Bit && MF.getTarget().getCodeModel() == CodeModel::Large) { 2926 // Under the large code model, we cannot assume that __morestack lives 2927 // within 2^31 bytes of the call site, so we cannot use pc-relative 2928 // addressing. We cannot perform the call via a temporary register, 2929 // as the rax register may be used to store the static chain, and all 2930 // other suitable registers may be either callee-save or used for 2931 // parameter passing. We cannot use the stack at this point either 2932 // because __morestack manipulates the stack directly. 2933 // 2934 // To avoid these issues, perform an indirect call via a read-only memory 2935 // location containing the address. 2936 // 2937 // This solution is not perfect, as it assumes that the .rodata section 2938 // is laid out within 2^31 bytes of each function body, but this seems 2939 // to be sufficient for JIT. 2940 // FIXME: Add retpoline support and remove the error here.. 2941 if (STI.useIndirectThunkCalls()) 2942 report_fatal_error("Emitting morestack calls on 64-bit with the large " 2943 "code model and thunks not yet implemented."); 2944 BuildMI(allocMBB, DL, TII.get(X86::CALL64m)) 2945 .addReg(X86::RIP) 2946 .addImm(0) 2947 .addReg(0) 2948 .addExternalSymbol("__morestack_addr") 2949 .addReg(0); 2950 MF.getMMI().setUsesMorestackAddr(true); 2951 } else { 2952 if (Is64Bit) 2953 BuildMI(allocMBB, DL, TII.get(X86::CALL64pcrel32)) 2954 .addExternalSymbol("__morestack"); 2955 else 2956 BuildMI(allocMBB, DL, TII.get(X86::CALLpcrel32)) 2957 .addExternalSymbol("__morestack"); 2958 } 2959 2960 if (IsNested) 2961 BuildMI(allocMBB, DL, TII.get(X86::MORESTACK_RET_RESTORE_R10)); 2962 else 2963 BuildMI(allocMBB, DL, TII.get(X86::MORESTACK_RET)); 2964 2965 allocMBB->addSuccessor(&PrologueMBB); 2966 2967 checkMBB->addSuccessor(allocMBB, BranchProbability::getZero()); 2968 checkMBB->addSuccessor(&PrologueMBB, BranchProbability::getOne()); 2969 2970 #ifdef EXPENSIVE_CHECKS 2971 MF.verify(); 2972 #endif 2973 } 2974 2975 /// Lookup an ERTS parameter in the !hipe.literals named metadata node. 2976 /// HiPE provides Erlang Runtime System-internal parameters, such as PCB offsets 2977 /// to fields it needs, through a named metadata node "hipe.literals" containing 2978 /// name-value pairs. 2979 static unsigned getHiPELiteral( 2980 NamedMDNode *HiPELiteralsMD, const StringRef LiteralName) { 2981 for (int i = 0, e = HiPELiteralsMD->getNumOperands(); i != e; ++i) { 2982 MDNode *Node = HiPELiteralsMD->getOperand(i); 2983 if (Node->getNumOperands() != 2) continue; 2984 MDString *NodeName = dyn_cast<MDString>(Node->getOperand(0)); 2985 ValueAsMetadata *NodeVal = dyn_cast<ValueAsMetadata>(Node->getOperand(1)); 2986 if (!NodeName || !NodeVal) continue; 2987 ConstantInt *ValConst = dyn_cast_or_null<ConstantInt>(NodeVal->getValue()); 2988 if (ValConst && NodeName->getString() == LiteralName) { 2989 return ValConst->getZExtValue(); 2990 } 2991 } 2992 2993 report_fatal_error("HiPE literal " + LiteralName 2994 + " required but not provided"); 2995 } 2996 2997 // Return true if there are no non-ehpad successors to MBB and there are no 2998 // non-meta instructions between MBBI and MBB.end(). 2999 static bool blockEndIsUnreachable(const MachineBasicBlock &MBB, 3000 MachineBasicBlock::const_iterator MBBI) { 3001 return llvm::all_of( 3002 MBB.successors(), 3003 [](const MachineBasicBlock *Succ) { return Succ->isEHPad(); }) && 3004 std::all_of(MBBI, MBB.end(), [](const MachineInstr &MI) { 3005 return MI.isMetaInstruction(); 3006 }); 3007 } 3008 3009 /// Erlang programs may need a special prologue to handle the stack size they 3010 /// might need at runtime. That is because Erlang/OTP does not implement a C 3011 /// stack but uses a custom implementation of hybrid stack/heap architecture. 3012 /// (for more information see Eric Stenman's Ph.D. thesis: 3013 /// http://publications.uu.se/uu/fulltext/nbn_se_uu_diva-2688.pdf) 3014 /// 3015 /// CheckStack: 3016 /// temp0 = sp - MaxStack 3017 /// if( temp0 < SP_LIMIT(P) ) goto IncStack else goto OldStart 3018 /// OldStart: 3019 /// ... 3020 /// IncStack: 3021 /// call inc_stack # doubles the stack space 3022 /// temp0 = sp - MaxStack 3023 /// if( temp0 < SP_LIMIT(P) ) goto IncStack else goto OldStart 3024 void X86FrameLowering::adjustForHiPEPrologue( 3025 MachineFunction &MF, MachineBasicBlock &PrologueMBB) const { 3026 MachineFrameInfo &MFI = MF.getFrameInfo(); 3027 DebugLoc DL; 3028 3029 // To support shrink-wrapping we would need to insert the new blocks 3030 // at the right place and update the branches to PrologueMBB. 3031 assert(&(*MF.begin()) == &PrologueMBB && "Shrink-wrapping not supported yet"); 3032 3033 // HiPE-specific values 3034 NamedMDNode *HiPELiteralsMD = MF.getMMI().getModule() 3035 ->getNamedMetadata("hipe.literals"); 3036 if (!HiPELiteralsMD) 3037 report_fatal_error( 3038 "Can't generate HiPE prologue without runtime parameters"); 3039 const unsigned HipeLeafWords 3040 = getHiPELiteral(HiPELiteralsMD, 3041 Is64Bit ? "AMD64_LEAF_WORDS" : "X86_LEAF_WORDS"); 3042 const unsigned CCRegisteredArgs = Is64Bit ? 6 : 5; 3043 const unsigned Guaranteed = HipeLeafWords * SlotSize; 3044 unsigned CallerStkArity = MF.getFunction().arg_size() > CCRegisteredArgs ? 3045 MF.getFunction().arg_size() - CCRegisteredArgs : 0; 3046 unsigned MaxStack = MFI.getStackSize() + CallerStkArity*SlotSize + SlotSize; 3047 3048 assert(STI.isTargetLinux() && 3049 "HiPE prologue is only supported on Linux operating systems."); 3050 3051 // Compute the largest caller's frame that is needed to fit the callees' 3052 // frames. This 'MaxStack' is computed from: 3053 // 3054 // a) the fixed frame size, which is the space needed for all spilled temps, 3055 // b) outgoing on-stack parameter areas, and 3056 // c) the minimum stack space this function needs to make available for the 3057 // functions it calls (a tunable ABI property). 3058 if (MFI.hasCalls()) { 3059 unsigned MoreStackForCalls = 0; 3060 3061 for (auto &MBB : MF) { 3062 for (auto &MI : MBB) { 3063 if (!MI.isCall()) 3064 continue; 3065 3066 // Get callee operand. 3067 const MachineOperand &MO = MI.getOperand(0); 3068 3069 // Only take account of global function calls (no closures etc.). 3070 if (!MO.isGlobal()) 3071 continue; 3072 3073 const Function *F = dyn_cast<Function>(MO.getGlobal()); 3074 if (!F) 3075 continue; 3076 3077 // Do not update 'MaxStack' for primitive and built-in functions 3078 // (encoded with names either starting with "erlang."/"bif_" or not 3079 // having a ".", such as a simple <Module>.<Function>.<Arity>, or an 3080 // "_", such as the BIF "suspend_0") as they are executed on another 3081 // stack. 3082 if (F->getName().find("erlang.") != StringRef::npos || 3083 F->getName().find("bif_") != StringRef::npos || 3084 F->getName().find_first_of("._") == StringRef::npos) 3085 continue; 3086 3087 unsigned CalleeStkArity = 3088 F->arg_size() > CCRegisteredArgs ? F->arg_size()-CCRegisteredArgs : 0; 3089 if (HipeLeafWords - 1 > CalleeStkArity) 3090 MoreStackForCalls = std::max(MoreStackForCalls, 3091 (HipeLeafWords - 1 - CalleeStkArity) * SlotSize); 3092 } 3093 } 3094 MaxStack += MoreStackForCalls; 3095 } 3096 3097 // If the stack frame needed is larger than the guaranteed then runtime checks 3098 // and calls to "inc_stack_0" BIF should be inserted in the assembly prologue. 3099 if (MaxStack > Guaranteed) { 3100 MachineBasicBlock *stackCheckMBB = MF.CreateMachineBasicBlock(); 3101 MachineBasicBlock *incStackMBB = MF.CreateMachineBasicBlock(); 3102 3103 for (const auto &LI : PrologueMBB.liveins()) { 3104 stackCheckMBB->addLiveIn(LI); 3105 incStackMBB->addLiveIn(LI); 3106 } 3107 3108 MF.push_front(incStackMBB); 3109 MF.push_front(stackCheckMBB); 3110 3111 unsigned ScratchReg, SPReg, PReg, SPLimitOffset; 3112 unsigned LEAop, CMPop, CALLop; 3113 SPLimitOffset = getHiPELiteral(HiPELiteralsMD, "P_NSP_LIMIT"); 3114 if (Is64Bit) { 3115 SPReg = X86::RSP; 3116 PReg = X86::RBP; 3117 LEAop = X86::LEA64r; 3118 CMPop = X86::CMP64rm; 3119 CALLop = X86::CALL64pcrel32; 3120 } else { 3121 SPReg = X86::ESP; 3122 PReg = X86::EBP; 3123 LEAop = X86::LEA32r; 3124 CMPop = X86::CMP32rm; 3125 CALLop = X86::CALLpcrel32; 3126 } 3127 3128 ScratchReg = GetScratchRegister(Is64Bit, IsLP64, MF, true); 3129 assert(!MF.getRegInfo().isLiveIn(ScratchReg) && 3130 "HiPE prologue scratch register is live-in"); 3131 3132 // Create new MBB for StackCheck: 3133 addRegOffset(BuildMI(stackCheckMBB, DL, TII.get(LEAop), ScratchReg), 3134 SPReg, false, -MaxStack); 3135 // SPLimitOffset is in a fixed heap location (pointed by BP). 3136 addRegOffset(BuildMI(stackCheckMBB, DL, TII.get(CMPop)) 3137 .addReg(ScratchReg), PReg, false, SPLimitOffset); 3138 BuildMI(stackCheckMBB, DL, TII.get(X86::JCC_1)).addMBB(&PrologueMBB).addImm(X86::COND_AE); 3139 3140 // Create new MBB for IncStack: 3141 BuildMI(incStackMBB, DL, TII.get(CALLop)). 3142 addExternalSymbol("inc_stack_0"); 3143 addRegOffset(BuildMI(incStackMBB, DL, TII.get(LEAop), ScratchReg), 3144 SPReg, false, -MaxStack); 3145 addRegOffset(BuildMI(incStackMBB, DL, TII.get(CMPop)) 3146 .addReg(ScratchReg), PReg, false, SPLimitOffset); 3147 BuildMI(incStackMBB, DL, TII.get(X86::JCC_1)).addMBB(incStackMBB).addImm(X86::COND_LE); 3148 3149 stackCheckMBB->addSuccessor(&PrologueMBB, {99, 100}); 3150 stackCheckMBB->addSuccessor(incStackMBB, {1, 100}); 3151 incStackMBB->addSuccessor(&PrologueMBB, {99, 100}); 3152 incStackMBB->addSuccessor(incStackMBB, {1, 100}); 3153 } 3154 #ifdef EXPENSIVE_CHECKS 3155 MF.verify(); 3156 #endif 3157 } 3158 3159 bool X86FrameLowering::adjustStackWithPops(MachineBasicBlock &MBB, 3160 MachineBasicBlock::iterator MBBI, 3161 const DebugLoc &DL, 3162 int Offset) const { 3163 if (Offset <= 0) 3164 return false; 3165 3166 if (Offset % SlotSize) 3167 return false; 3168 3169 int NumPops = Offset / SlotSize; 3170 // This is only worth it if we have at most 2 pops. 3171 if (NumPops != 1 && NumPops != 2) 3172 return false; 3173 3174 // Handle only the trivial case where the adjustment directly follows 3175 // a call. This is the most common one, anyway. 3176 if (MBBI == MBB.begin()) 3177 return false; 3178 MachineBasicBlock::iterator Prev = std::prev(MBBI); 3179 if (!Prev->isCall() || !Prev->getOperand(1).isRegMask()) 3180 return false; 3181 3182 unsigned Regs[2]; 3183 unsigned FoundRegs = 0; 3184 3185 const MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo(); 3186 const MachineOperand &RegMask = Prev->getOperand(1); 3187 3188 auto &RegClass = 3189 Is64Bit ? X86::GR64_NOREX_NOSPRegClass : X86::GR32_NOREX_NOSPRegClass; 3190 // Try to find up to NumPops free registers. 3191 for (auto Candidate : RegClass) { 3192 // Poor man's liveness: 3193 // Since we're immediately after a call, any register that is clobbered 3194 // by the call and not defined by it can be considered dead. 3195 if (!RegMask.clobbersPhysReg(Candidate)) 3196 continue; 3197 3198 // Don't clobber reserved registers 3199 if (MRI.isReserved(Candidate)) 3200 continue; 3201 3202 bool IsDef = false; 3203 for (const MachineOperand &MO : Prev->implicit_operands()) { 3204 if (MO.isReg() && MO.isDef() && 3205 TRI->isSuperOrSubRegisterEq(MO.getReg(), Candidate)) { 3206 IsDef = true; 3207 break; 3208 } 3209 } 3210 3211 if (IsDef) 3212 continue; 3213 3214 Regs[FoundRegs++] = Candidate; 3215 if (FoundRegs == (unsigned)NumPops) 3216 break; 3217 } 3218 3219 if (FoundRegs == 0) 3220 return false; 3221 3222 // If we found only one free register, but need two, reuse the same one twice. 3223 while (FoundRegs < (unsigned)NumPops) 3224 Regs[FoundRegs++] = Regs[0]; 3225 3226 for (int i = 0; i < NumPops; ++i) 3227 BuildMI(MBB, MBBI, DL, 3228 TII.get(STI.is64Bit() ? X86::POP64r : X86::POP32r), Regs[i]); 3229 3230 return true; 3231 } 3232 3233 MachineBasicBlock::iterator X86FrameLowering:: 3234 eliminateCallFramePseudoInstr(MachineFunction &MF, MachineBasicBlock &MBB, 3235 MachineBasicBlock::iterator I) const { 3236 bool reserveCallFrame = hasReservedCallFrame(MF); 3237 unsigned Opcode = I->getOpcode(); 3238 bool isDestroy = Opcode == TII.getCallFrameDestroyOpcode(); 3239 DebugLoc DL = I->getDebugLoc(); // copy DebugLoc as I will be erased. 3240 uint64_t Amount = TII.getFrameSize(*I); 3241 uint64_t InternalAmt = (isDestroy || Amount) ? TII.getFrameAdjustment(*I) : 0; 3242 I = MBB.erase(I); 3243 auto InsertPos = skipDebugInstructionsForward(I, MBB.end()); 3244 3245 // Try to avoid emitting dead SP adjustments if the block end is unreachable, 3246 // typically because the function is marked noreturn (abort, throw, 3247 // assert_fail, etc). 3248 if (isDestroy && blockEndIsUnreachable(MBB, I)) 3249 return I; 3250 3251 if (!reserveCallFrame) { 3252 // If the stack pointer can be changed after prologue, turn the 3253 // adjcallstackup instruction into a 'sub ESP, <amt>' and the 3254 // adjcallstackdown instruction into 'add ESP, <amt>' 3255 3256 // We need to keep the stack aligned properly. To do this, we round the 3257 // amount of space needed for the outgoing arguments up to the next 3258 // alignment boundary. 3259 Amount = alignTo(Amount, getStackAlign()); 3260 3261 const Function &F = MF.getFunction(); 3262 bool WindowsCFI = MF.getTarget().getMCAsmInfo()->usesWindowsCFI(); 3263 bool DwarfCFI = !WindowsCFI && MF.needsFrameMoves(); 3264 3265 // If we have any exception handlers in this function, and we adjust 3266 // the SP before calls, we may need to indicate this to the unwinder 3267 // using GNU_ARGS_SIZE. Note that this may be necessary even when 3268 // Amount == 0, because the preceding function may have set a non-0 3269 // GNU_ARGS_SIZE. 3270 // TODO: We don't need to reset this between subsequent functions, 3271 // if it didn't change. 3272 bool HasDwarfEHHandlers = !WindowsCFI && !MF.getLandingPads().empty(); 3273 3274 if (HasDwarfEHHandlers && !isDestroy && 3275 MF.getInfo<X86MachineFunctionInfo>()->getHasPushSequences()) 3276 BuildCFI(MBB, InsertPos, DL, 3277 MCCFIInstruction::createGnuArgsSize(nullptr, Amount)); 3278 3279 if (Amount == 0) 3280 return I; 3281 3282 // Factor out the amount that gets handled inside the sequence 3283 // (Pushes of argument for frame setup, callee pops for frame destroy) 3284 Amount -= InternalAmt; 3285 3286 // TODO: This is needed only if we require precise CFA. 3287 // If this is a callee-pop calling convention, emit a CFA adjust for 3288 // the amount the callee popped. 3289 if (isDestroy && InternalAmt && DwarfCFI && !hasFP(MF)) 3290 BuildCFI(MBB, InsertPos, DL, 3291 MCCFIInstruction::createAdjustCfaOffset(nullptr, -InternalAmt)); 3292 3293 // Add Amount to SP to destroy a frame, or subtract to setup. 3294 int64_t StackAdjustment = isDestroy ? Amount : -Amount; 3295 3296 if (StackAdjustment) { 3297 // Merge with any previous or following adjustment instruction. Note: the 3298 // instructions merged with here do not have CFI, so their stack 3299 // adjustments do not feed into CfaAdjustment. 3300 StackAdjustment += mergeSPUpdates(MBB, InsertPos, true); 3301 StackAdjustment += mergeSPUpdates(MBB, InsertPos, false); 3302 3303 if (StackAdjustment) { 3304 if (!(F.hasMinSize() && 3305 adjustStackWithPops(MBB, InsertPos, DL, StackAdjustment))) 3306 BuildStackAdjustment(MBB, InsertPos, DL, StackAdjustment, 3307 /*InEpilogue=*/false); 3308 } 3309 } 3310 3311 if (DwarfCFI && !hasFP(MF)) { 3312 // If we don't have FP, but need to generate unwind information, 3313 // we need to set the correct CFA offset after the stack adjustment. 3314 // How much we adjust the CFA offset depends on whether we're emitting 3315 // CFI only for EH purposes or for debugging. EH only requires the CFA 3316 // offset to be correct at each call site, while for debugging we want 3317 // it to be more precise. 3318 3319 int64_t CfaAdjustment = -StackAdjustment; 3320 // TODO: When not using precise CFA, we also need to adjust for the 3321 // InternalAmt here. 3322 if (CfaAdjustment) { 3323 BuildCFI(MBB, InsertPos, DL, 3324 MCCFIInstruction::createAdjustCfaOffset(nullptr, 3325 CfaAdjustment)); 3326 } 3327 } 3328 3329 return I; 3330 } 3331 3332 if (InternalAmt) { 3333 MachineBasicBlock::iterator CI = I; 3334 MachineBasicBlock::iterator B = MBB.begin(); 3335 while (CI != B && !std::prev(CI)->isCall()) 3336 --CI; 3337 BuildStackAdjustment(MBB, CI, DL, -InternalAmt, /*InEpilogue=*/false); 3338 } 3339 3340 return I; 3341 } 3342 3343 bool X86FrameLowering::canUseAsPrologue(const MachineBasicBlock &MBB) const { 3344 assert(MBB.getParent() && "Block is not attached to a function!"); 3345 const MachineFunction &MF = *MBB.getParent(); 3346 if (!MBB.isLiveIn(X86::EFLAGS)) 3347 return true; 3348 3349 const X86MachineFunctionInfo *X86FI = MF.getInfo<X86MachineFunctionInfo>(); 3350 return !TRI->hasStackRealignment(MF) && !X86FI->hasSwiftAsyncContext(); 3351 } 3352 3353 bool X86FrameLowering::canUseAsEpilogue(const MachineBasicBlock &MBB) const { 3354 assert(MBB.getParent() && "Block is not attached to a function!"); 3355 3356 // Win64 has strict requirements in terms of epilogue and we are 3357 // not taking a chance at messing with them. 3358 // I.e., unless this block is already an exit block, we can't use 3359 // it as an epilogue. 3360 if (STI.isTargetWin64() && !MBB.succ_empty() && !MBB.isReturnBlock()) 3361 return false; 3362 3363 // Swift async context epilogue has a BTR instruction that clobbers parts of 3364 // EFLAGS. 3365 const MachineFunction &MF = *MBB.getParent(); 3366 if (MF.getInfo<X86MachineFunctionInfo>()->hasSwiftAsyncContext()) 3367 return !flagsNeedToBePreservedBeforeTheTerminators(MBB); 3368 3369 if (canUseLEAForSPInEpilogue(*MBB.getParent())) 3370 return true; 3371 3372 // If we cannot use LEA to adjust SP, we may need to use ADD, which 3373 // clobbers the EFLAGS. Check that we do not need to preserve it, 3374 // otherwise, conservatively assume this is not 3375 // safe to insert the epilogue here. 3376 return !flagsNeedToBePreservedBeforeTheTerminators(MBB); 3377 } 3378 3379 bool X86FrameLowering::enableShrinkWrapping(const MachineFunction &MF) const { 3380 // If we may need to emit frameless compact unwind information, give 3381 // up as this is currently broken: PR25614. 3382 bool CompactUnwind = 3383 MF.getMMI().getContext().getObjectFileInfo()->getCompactUnwindSection() != 3384 nullptr; 3385 return (MF.getFunction().hasFnAttribute(Attribute::NoUnwind) || hasFP(MF) || 3386 !CompactUnwind) && 3387 // The lowering of segmented stack and HiPE only support entry 3388 // blocks as prologue blocks: PR26107. This limitation may be 3389 // lifted if we fix: 3390 // - adjustForSegmentedStacks 3391 // - adjustForHiPEPrologue 3392 MF.getFunction().getCallingConv() != CallingConv::HiPE && 3393 !MF.shouldSplitStack(); 3394 } 3395 3396 MachineBasicBlock::iterator X86FrameLowering::restoreWin32EHStackPointers( 3397 MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, 3398 const DebugLoc &DL, bool RestoreSP) const { 3399 assert(STI.isTargetWindowsMSVC() && "funclets only supported in MSVC env"); 3400 assert(STI.isTargetWin32() && "EBP/ESI restoration only required on win32"); 3401 assert(STI.is32Bit() && !Uses64BitFramePtr && 3402 "restoring EBP/ESI on non-32-bit target"); 3403 3404 MachineFunction &MF = *MBB.getParent(); 3405 Register FramePtr = TRI->getFrameRegister(MF); 3406 Register BasePtr = TRI->getBaseRegister(); 3407 WinEHFuncInfo &FuncInfo = *MF.getWinEHFuncInfo(); 3408 X86MachineFunctionInfo *X86FI = MF.getInfo<X86MachineFunctionInfo>(); 3409 MachineFrameInfo &MFI = MF.getFrameInfo(); 3410 3411 // FIXME: Don't set FrameSetup flag in catchret case. 3412 3413 int FI = FuncInfo.EHRegNodeFrameIndex; 3414 int EHRegSize = MFI.getObjectSize(FI); 3415 3416 if (RestoreSP) { 3417 // MOV32rm -EHRegSize(%ebp), %esp 3418 addRegOffset(BuildMI(MBB, MBBI, DL, TII.get(X86::MOV32rm), X86::ESP), 3419 X86::EBP, true, -EHRegSize) 3420 .setMIFlag(MachineInstr::FrameSetup); 3421 } 3422 3423 Register UsedReg; 3424 int EHRegOffset = getFrameIndexReference(MF, FI, UsedReg).getFixed(); 3425 int EndOffset = -EHRegOffset - EHRegSize; 3426 FuncInfo.EHRegNodeEndOffset = EndOffset; 3427 3428 if (UsedReg == FramePtr) { 3429 // ADD $offset, %ebp 3430 unsigned ADDri = getADDriOpcode(false, EndOffset); 3431 BuildMI(MBB, MBBI, DL, TII.get(ADDri), FramePtr) 3432 .addReg(FramePtr) 3433 .addImm(EndOffset) 3434 .setMIFlag(MachineInstr::FrameSetup) 3435 ->getOperand(3) 3436 .setIsDead(); 3437 assert(EndOffset >= 0 && 3438 "end of registration object above normal EBP position!"); 3439 } else if (UsedReg == BasePtr) { 3440 // LEA offset(%ebp), %esi 3441 addRegOffset(BuildMI(MBB, MBBI, DL, TII.get(X86::LEA32r), BasePtr), 3442 FramePtr, false, EndOffset) 3443 .setMIFlag(MachineInstr::FrameSetup); 3444 // MOV32rm SavedEBPOffset(%esi), %ebp 3445 assert(X86FI->getHasSEHFramePtrSave()); 3446 int Offset = 3447 getFrameIndexReference(MF, X86FI->getSEHFramePtrSaveIndex(), UsedReg) 3448 .getFixed(); 3449 assert(UsedReg == BasePtr); 3450 addRegOffset(BuildMI(MBB, MBBI, DL, TII.get(X86::MOV32rm), FramePtr), 3451 UsedReg, true, Offset) 3452 .setMIFlag(MachineInstr::FrameSetup); 3453 } else { 3454 llvm_unreachable("32-bit frames with WinEH must use FramePtr or BasePtr"); 3455 } 3456 return MBBI; 3457 } 3458 3459 int X86FrameLowering::getInitialCFAOffset(const MachineFunction &MF) const { 3460 return TRI->getSlotSize(); 3461 } 3462 3463 Register 3464 X86FrameLowering::getInitialCFARegister(const MachineFunction &MF) const { 3465 return TRI->getDwarfRegNum(StackPtr, true); 3466 } 3467 3468 namespace { 3469 // Struct used by orderFrameObjects to help sort the stack objects. 3470 struct X86FrameSortingObject { 3471 bool IsValid = false; // true if we care about this Object. 3472 unsigned ObjectIndex = 0; // Index of Object into MFI list. 3473 unsigned ObjectSize = 0; // Size of Object in bytes. 3474 Align ObjectAlignment = Align(1); // Alignment of Object in bytes. 3475 unsigned ObjectNumUses = 0; // Object static number of uses. 3476 }; 3477 3478 // The comparison function we use for std::sort to order our local 3479 // stack symbols. The current algorithm is to use an estimated 3480 // "density". This takes into consideration the size and number of 3481 // uses each object has in order to roughly minimize code size. 3482 // So, for example, an object of size 16B that is referenced 5 times 3483 // will get higher priority than 4 4B objects referenced 1 time each. 3484 // It's not perfect and we may be able to squeeze a few more bytes out of 3485 // it (for example : 0(esp) requires fewer bytes, symbols allocated at the 3486 // fringe end can have special consideration, given their size is less 3487 // important, etc.), but the algorithmic complexity grows too much to be 3488 // worth the extra gains we get. This gets us pretty close. 3489 // The final order leaves us with objects with highest priority going 3490 // at the end of our list. 3491 struct X86FrameSortingComparator { 3492 inline bool operator()(const X86FrameSortingObject &A, 3493 const X86FrameSortingObject &B) const { 3494 uint64_t DensityAScaled, DensityBScaled; 3495 3496 // For consistency in our comparison, all invalid objects are placed 3497 // at the end. This also allows us to stop walking when we hit the 3498 // first invalid item after it's all sorted. 3499 if (!A.IsValid) 3500 return false; 3501 if (!B.IsValid) 3502 return true; 3503 3504 // The density is calculated by doing : 3505 // (double)DensityA = A.ObjectNumUses / A.ObjectSize 3506 // (double)DensityB = B.ObjectNumUses / B.ObjectSize 3507 // Since this approach may cause inconsistencies in 3508 // the floating point <, >, == comparisons, depending on the floating 3509 // point model with which the compiler was built, we're going 3510 // to scale both sides by multiplying with 3511 // A.ObjectSize * B.ObjectSize. This ends up factoring away 3512 // the division and, with it, the need for any floating point 3513 // arithmetic. 3514 DensityAScaled = static_cast<uint64_t>(A.ObjectNumUses) * 3515 static_cast<uint64_t>(B.ObjectSize); 3516 DensityBScaled = static_cast<uint64_t>(B.ObjectNumUses) * 3517 static_cast<uint64_t>(A.ObjectSize); 3518 3519 // If the two densities are equal, prioritize highest alignment 3520 // objects. This allows for similar alignment objects 3521 // to be packed together (given the same density). 3522 // There's room for improvement here, also, since we can pack 3523 // similar alignment (different density) objects next to each 3524 // other to save padding. This will also require further 3525 // complexity/iterations, and the overall gain isn't worth it, 3526 // in general. Something to keep in mind, though. 3527 if (DensityAScaled == DensityBScaled) 3528 return A.ObjectAlignment < B.ObjectAlignment; 3529 3530 return DensityAScaled < DensityBScaled; 3531 } 3532 }; 3533 } // namespace 3534 3535 // Order the symbols in the local stack. 3536 // We want to place the local stack objects in some sort of sensible order. 3537 // The heuristic we use is to try and pack them according to static number 3538 // of uses and size of object in order to minimize code size. 3539 void X86FrameLowering::orderFrameObjects( 3540 const MachineFunction &MF, SmallVectorImpl<int> &ObjectsToAllocate) const { 3541 const MachineFrameInfo &MFI = MF.getFrameInfo(); 3542 3543 // Don't waste time if there's nothing to do. 3544 if (ObjectsToAllocate.empty()) 3545 return; 3546 3547 // Create an array of all MFI objects. We won't need all of these 3548 // objects, but we're going to create a full array of them to make 3549 // it easier to index into when we're counting "uses" down below. 3550 // We want to be able to easily/cheaply access an object by simply 3551 // indexing into it, instead of having to search for it every time. 3552 std::vector<X86FrameSortingObject> SortingObjects(MFI.getObjectIndexEnd()); 3553 3554 // Walk the objects we care about and mark them as such in our working 3555 // struct. 3556 for (auto &Obj : ObjectsToAllocate) { 3557 SortingObjects[Obj].IsValid = true; 3558 SortingObjects[Obj].ObjectIndex = Obj; 3559 SortingObjects[Obj].ObjectAlignment = MFI.getObjectAlign(Obj); 3560 // Set the size. 3561 int ObjectSize = MFI.getObjectSize(Obj); 3562 if (ObjectSize == 0) 3563 // Variable size. Just use 4. 3564 SortingObjects[Obj].ObjectSize = 4; 3565 else 3566 SortingObjects[Obj].ObjectSize = ObjectSize; 3567 } 3568 3569 // Count the number of uses for each object. 3570 for (auto &MBB : MF) { 3571 for (auto &MI : MBB) { 3572 if (MI.isDebugInstr()) 3573 continue; 3574 for (const MachineOperand &MO : MI.operands()) { 3575 // Check to see if it's a local stack symbol. 3576 if (!MO.isFI()) 3577 continue; 3578 int Index = MO.getIndex(); 3579 // Check to see if it falls within our range, and is tagged 3580 // to require ordering. 3581 if (Index >= 0 && Index < MFI.getObjectIndexEnd() && 3582 SortingObjects[Index].IsValid) 3583 SortingObjects[Index].ObjectNumUses++; 3584 } 3585 } 3586 } 3587 3588 // Sort the objects using X86FrameSortingAlgorithm (see its comment for 3589 // info). 3590 llvm::stable_sort(SortingObjects, X86FrameSortingComparator()); 3591 3592 // Now modify the original list to represent the final order that 3593 // we want. The order will depend on whether we're going to access them 3594 // from the stack pointer or the frame pointer. For SP, the list should 3595 // end up with the END containing objects that we want with smaller offsets. 3596 // For FP, it should be flipped. 3597 int i = 0; 3598 for (auto &Obj : SortingObjects) { 3599 // All invalid items are sorted at the end, so it's safe to stop. 3600 if (!Obj.IsValid) 3601 break; 3602 ObjectsToAllocate[i++] = Obj.ObjectIndex; 3603 } 3604 3605 // Flip it if we're accessing off of the FP. 3606 if (!TRI->hasStackRealignment(MF) && hasFP(MF)) 3607 std::reverse(ObjectsToAllocate.begin(), ObjectsToAllocate.end()); 3608 } 3609 3610 3611 unsigned X86FrameLowering::getWinEHParentFrameOffset(const MachineFunction &MF) const { 3612 // RDX, the parent frame pointer, is homed into 16(%rsp) in the prologue. 3613 unsigned Offset = 16; 3614 // RBP is immediately pushed. 3615 Offset += SlotSize; 3616 // All callee-saved registers are then pushed. 3617 Offset += MF.getInfo<X86MachineFunctionInfo>()->getCalleeSavedFrameSize(); 3618 // Every funclet allocates enough stack space for the largest outgoing call. 3619 Offset += getWinEHFuncletFrameSize(MF); 3620 return Offset; 3621 } 3622 3623 void X86FrameLowering::processFunctionBeforeFrameFinalized( 3624 MachineFunction &MF, RegScavenger *RS) const { 3625 // Mark the function as not having WinCFI. We will set it back to true in 3626 // emitPrologue if it gets called and emits CFI. 3627 MF.setHasWinCFI(false); 3628 3629 // If we are using Windows x64 CFI, ensure that the stack is always 8 byte 3630 // aligned. The format doesn't support misaligned stack adjustments. 3631 if (MF.getTarget().getMCAsmInfo()->usesWindowsCFI()) 3632 MF.getFrameInfo().ensureMaxAlignment(Align(SlotSize)); 3633 3634 // If this function isn't doing Win64-style C++ EH, we don't need to do 3635 // anything. 3636 if (STI.is64Bit() && MF.hasEHFunclets() && 3637 classifyEHPersonality(MF.getFunction().getPersonalityFn()) == 3638 EHPersonality::MSVC_CXX) { 3639 adjustFrameForMsvcCxxEh(MF); 3640 } 3641 } 3642 3643 void X86FrameLowering::adjustFrameForMsvcCxxEh(MachineFunction &MF) const { 3644 // Win64 C++ EH needs to allocate the UnwindHelp object at some fixed offset 3645 // relative to RSP after the prologue. Find the offset of the last fixed 3646 // object, so that we can allocate a slot immediately following it. If there 3647 // were no fixed objects, use offset -SlotSize, which is immediately after the 3648 // return address. Fixed objects have negative frame indices. 3649 MachineFrameInfo &MFI = MF.getFrameInfo(); 3650 WinEHFuncInfo &EHInfo = *MF.getWinEHFuncInfo(); 3651 int64_t MinFixedObjOffset = -SlotSize; 3652 for (int I = MFI.getObjectIndexBegin(); I < 0; ++I) 3653 MinFixedObjOffset = std::min(MinFixedObjOffset, MFI.getObjectOffset(I)); 3654 3655 for (WinEHTryBlockMapEntry &TBME : EHInfo.TryBlockMap) { 3656 for (WinEHHandlerType &H : TBME.HandlerArray) { 3657 int FrameIndex = H.CatchObj.FrameIndex; 3658 if (FrameIndex != INT_MAX) { 3659 // Ensure alignment. 3660 unsigned Align = MFI.getObjectAlign(FrameIndex).value(); 3661 MinFixedObjOffset -= std::abs(MinFixedObjOffset) % Align; 3662 MinFixedObjOffset -= MFI.getObjectSize(FrameIndex); 3663 MFI.setObjectOffset(FrameIndex, MinFixedObjOffset); 3664 } 3665 } 3666 } 3667 3668 // Ensure alignment. 3669 MinFixedObjOffset -= std::abs(MinFixedObjOffset) % 8; 3670 int64_t UnwindHelpOffset = MinFixedObjOffset - SlotSize; 3671 int UnwindHelpFI = 3672 MFI.CreateFixedObject(SlotSize, UnwindHelpOffset, /*IsImmutable=*/false); 3673 EHInfo.UnwindHelpFrameIdx = UnwindHelpFI; 3674 3675 // Store -2 into UnwindHelp on function entry. We have to scan forwards past 3676 // other frame setup instructions. 3677 MachineBasicBlock &MBB = MF.front(); 3678 auto MBBI = MBB.begin(); 3679 while (MBBI != MBB.end() && MBBI->getFlag(MachineInstr::FrameSetup)) 3680 ++MBBI; 3681 3682 DebugLoc DL = MBB.findDebugLoc(MBBI); 3683 addFrameReference(BuildMI(MBB, MBBI, DL, TII.get(X86::MOV64mi32)), 3684 UnwindHelpFI) 3685 .addImm(-2); 3686 } 3687 3688 void X86FrameLowering::processFunctionBeforeFrameIndicesReplaced( 3689 MachineFunction &MF, RegScavenger *RS) const { 3690 if (STI.is32Bit() && MF.hasEHFunclets()) 3691 restoreWinEHStackPointersInParent(MF); 3692 } 3693 3694 void X86FrameLowering::restoreWinEHStackPointersInParent( 3695 MachineFunction &MF) const { 3696 // 32-bit functions have to restore stack pointers when control is transferred 3697 // back to the parent function. These blocks are identified as eh pads that 3698 // are not funclet entries. 3699 bool IsSEH = isAsynchronousEHPersonality( 3700 classifyEHPersonality(MF.getFunction().getPersonalityFn())); 3701 for (MachineBasicBlock &MBB : MF) { 3702 bool NeedsRestore = MBB.isEHPad() && !MBB.isEHFuncletEntry(); 3703 if (NeedsRestore) 3704 restoreWin32EHStackPointers(MBB, MBB.begin(), DebugLoc(), 3705 /*RestoreSP=*/IsSEH); 3706 } 3707 } 3708