1 //===-- X86FrameLowering.cpp - X86 Frame Information ----------------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This file contains the X86 implementation of TargetFrameLowering class. 10 // 11 //===----------------------------------------------------------------------===// 12 13 #include "X86FrameLowering.h" 14 #include "MCTargetDesc/X86MCTargetDesc.h" 15 #include "X86InstrBuilder.h" 16 #include "X86InstrInfo.h" 17 #include "X86MachineFunctionInfo.h" 18 #include "X86Subtarget.h" 19 #include "X86TargetMachine.h" 20 #include "llvm/ADT/SmallSet.h" 21 #include "llvm/ADT/Statistic.h" 22 #include "llvm/Analysis/EHPersonalities.h" 23 #include "llvm/CodeGen/MachineFrameInfo.h" 24 #include "llvm/CodeGen/MachineFunction.h" 25 #include "llvm/CodeGen/MachineInstrBuilder.h" 26 #include "llvm/CodeGen/MachineModuleInfo.h" 27 #include "llvm/CodeGen/MachineRegisterInfo.h" 28 #include "llvm/CodeGen/WinEHFuncInfo.h" 29 #include "llvm/IR/DataLayout.h" 30 #include "llvm/IR/Function.h" 31 #include "llvm/MC/MCAsmInfo.h" 32 #include "llvm/MC/MCObjectFileInfo.h" 33 #include "llvm/MC/MCSymbol.h" 34 #include "llvm/Support/Debug.h" 35 #include "llvm/Target/TargetOptions.h" 36 #include <cstdlib> 37 38 #define DEBUG_TYPE "x86-fl" 39 40 STATISTIC(NumFrameLoopProbe, "Number of loop stack probes used in prologue"); 41 STATISTIC(NumFrameExtraProbe, 42 "Number of extra stack probes generated in prologue"); 43 44 using namespace llvm; 45 46 X86FrameLowering::X86FrameLowering(const X86Subtarget &STI, 47 MaybeAlign StackAlignOverride) 48 : TargetFrameLowering(StackGrowsDown, StackAlignOverride.valueOrOne(), 49 STI.is64Bit() ? -8 : -4), 50 STI(STI), TII(*STI.getInstrInfo()), TRI(STI.getRegisterInfo()) { 51 // Cache a bunch of frame-related predicates for this subtarget. 52 SlotSize = TRI->getSlotSize(); 53 Is64Bit = STI.is64Bit(); 54 IsLP64 = STI.isTarget64BitLP64(); 55 // standard x86_64 and NaCl use 64-bit frame/stack pointers, x32 - 32-bit. 56 Uses64BitFramePtr = STI.isTarget64BitLP64() || STI.isTargetNaCl64(); 57 StackPtr = TRI->getStackRegister(); 58 } 59 60 bool X86FrameLowering::hasReservedCallFrame(const MachineFunction &MF) const { 61 return !MF.getFrameInfo().hasVarSizedObjects() && 62 !MF.getInfo<X86MachineFunctionInfo>()->getHasPushSequences() && 63 !MF.getInfo<X86MachineFunctionInfo>()->hasPreallocatedCall(); 64 } 65 66 /// canSimplifyCallFramePseudos - If there is a reserved call frame, the 67 /// call frame pseudos can be simplified. Having a FP, as in the default 68 /// implementation, is not sufficient here since we can't always use it. 69 /// Use a more nuanced condition. 70 bool 71 X86FrameLowering::canSimplifyCallFramePseudos(const MachineFunction &MF) const { 72 return hasReservedCallFrame(MF) || 73 MF.getInfo<X86MachineFunctionInfo>()->hasPreallocatedCall() || 74 (hasFP(MF) && !TRI->hasStackRealignment(MF)) || 75 TRI->hasBasePointer(MF); 76 } 77 78 // needsFrameIndexResolution - Do we need to perform FI resolution for 79 // this function. Normally, this is required only when the function 80 // has any stack objects. However, FI resolution actually has another job, 81 // not apparent from the title - it resolves callframesetup/destroy 82 // that were not simplified earlier. 83 // So, this is required for x86 functions that have push sequences even 84 // when there are no stack objects. 85 bool 86 X86FrameLowering::needsFrameIndexResolution(const MachineFunction &MF) const { 87 return MF.getFrameInfo().hasStackObjects() || 88 MF.getInfo<X86MachineFunctionInfo>()->getHasPushSequences(); 89 } 90 91 /// hasFP - Return true if the specified function should have a dedicated frame 92 /// pointer register. This is true if the function has variable sized allocas 93 /// or if frame pointer elimination is disabled. 94 bool X86FrameLowering::hasFP(const MachineFunction &MF) const { 95 const MachineFrameInfo &MFI = MF.getFrameInfo(); 96 return (MF.getTarget().Options.DisableFramePointerElim(MF) || 97 TRI->hasStackRealignment(MF) || MFI.hasVarSizedObjects() || 98 MFI.isFrameAddressTaken() || MFI.hasOpaqueSPAdjustment() || 99 MF.getInfo<X86MachineFunctionInfo>()->getForceFramePointer() || 100 MF.getInfo<X86MachineFunctionInfo>()->hasPreallocatedCall() || 101 MF.callsUnwindInit() || MF.hasEHFunclets() || MF.callsEHReturn() || 102 MFI.hasStackMap() || MFI.hasPatchPoint() || 103 (isWin64Prologue(MF) && MFI.hasCopyImplyingStackAdjustment())); 104 } 105 106 static unsigned getSUBriOpcode(bool IsLP64, int64_t Imm) { 107 if (IsLP64) { 108 if (isInt<8>(Imm)) 109 return X86::SUB64ri8; 110 return X86::SUB64ri32; 111 } else { 112 if (isInt<8>(Imm)) 113 return X86::SUB32ri8; 114 return X86::SUB32ri; 115 } 116 } 117 118 static unsigned getADDriOpcode(bool IsLP64, int64_t Imm) { 119 if (IsLP64) { 120 if (isInt<8>(Imm)) 121 return X86::ADD64ri8; 122 return X86::ADD64ri32; 123 } else { 124 if (isInt<8>(Imm)) 125 return X86::ADD32ri8; 126 return X86::ADD32ri; 127 } 128 } 129 130 static unsigned getSUBrrOpcode(bool IsLP64) { 131 return IsLP64 ? X86::SUB64rr : X86::SUB32rr; 132 } 133 134 static unsigned getADDrrOpcode(bool IsLP64) { 135 return IsLP64 ? X86::ADD64rr : X86::ADD32rr; 136 } 137 138 static unsigned getANDriOpcode(bool IsLP64, int64_t Imm) { 139 if (IsLP64) { 140 if (isInt<8>(Imm)) 141 return X86::AND64ri8; 142 return X86::AND64ri32; 143 } 144 if (isInt<8>(Imm)) 145 return X86::AND32ri8; 146 return X86::AND32ri; 147 } 148 149 static unsigned getLEArOpcode(bool IsLP64) { 150 return IsLP64 ? X86::LEA64r : X86::LEA32r; 151 } 152 153 static unsigned getMOVriOpcode(bool Use64BitReg, int64_t Imm) { 154 if (Use64BitReg) { 155 if (isUInt<32>(Imm)) 156 return X86::MOV32ri64; 157 if (isInt<32>(Imm)) 158 return X86::MOV64ri32; 159 return X86::MOV64ri; 160 } 161 return X86::MOV32ri; 162 } 163 164 static bool isEAXLiveIn(MachineBasicBlock &MBB) { 165 for (MachineBasicBlock::RegisterMaskPair RegMask : MBB.liveins()) { 166 unsigned Reg = RegMask.PhysReg; 167 168 if (Reg == X86::RAX || Reg == X86::EAX || Reg == X86::AX || 169 Reg == X86::AH || Reg == X86::AL) 170 return true; 171 } 172 173 return false; 174 } 175 176 /// Check if the flags need to be preserved before the terminators. 177 /// This would be the case, if the eflags is live-in of the region 178 /// composed by the terminators or live-out of that region, without 179 /// being defined by a terminator. 180 static bool 181 flagsNeedToBePreservedBeforeTheTerminators(const MachineBasicBlock &MBB) { 182 for (const MachineInstr &MI : MBB.terminators()) { 183 bool BreakNext = false; 184 for (const MachineOperand &MO : MI.operands()) { 185 if (!MO.isReg()) 186 continue; 187 Register Reg = MO.getReg(); 188 if (Reg != X86::EFLAGS) 189 continue; 190 191 // This terminator needs an eflags that is not defined 192 // by a previous another terminator: 193 // EFLAGS is live-in of the region composed by the terminators. 194 if (!MO.isDef()) 195 return true; 196 // This terminator defines the eflags, i.e., we don't need to preserve it. 197 // However, we still need to check this specific terminator does not 198 // read a live-in value. 199 BreakNext = true; 200 } 201 // We found a definition of the eflags, no need to preserve them. 202 if (BreakNext) 203 return false; 204 } 205 206 // None of the terminators use or define the eflags. 207 // Check if they are live-out, that would imply we need to preserve them. 208 for (const MachineBasicBlock *Succ : MBB.successors()) 209 if (Succ->isLiveIn(X86::EFLAGS)) 210 return true; 211 212 return false; 213 } 214 215 /// emitSPUpdate - Emit a series of instructions to increment / decrement the 216 /// stack pointer by a constant value. 217 void X86FrameLowering::emitSPUpdate(MachineBasicBlock &MBB, 218 MachineBasicBlock::iterator &MBBI, 219 const DebugLoc &DL, 220 int64_t NumBytes, bool InEpilogue) const { 221 bool isSub = NumBytes < 0; 222 uint64_t Offset = isSub ? -NumBytes : NumBytes; 223 MachineInstr::MIFlag Flag = 224 isSub ? MachineInstr::FrameSetup : MachineInstr::FrameDestroy; 225 226 uint64_t Chunk = (1LL << 31) - 1; 227 228 MachineFunction &MF = *MBB.getParent(); 229 const X86Subtarget &STI = MF.getSubtarget<X86Subtarget>(); 230 const X86TargetLowering &TLI = *STI.getTargetLowering(); 231 const bool EmitInlineStackProbe = TLI.hasInlineStackProbe(MF); 232 233 // It's ok to not take into account large chunks when probing, as the 234 // allocation is split in smaller chunks anyway. 235 if (EmitInlineStackProbe && !InEpilogue) { 236 237 // This pseudo-instruction is going to be expanded, potentially using a 238 // loop, by inlineStackProbe(). 239 BuildMI(MBB, MBBI, DL, TII.get(X86::STACKALLOC_W_PROBING)).addImm(Offset); 240 return; 241 } else if (Offset > Chunk) { 242 // Rather than emit a long series of instructions for large offsets, 243 // load the offset into a register and do one sub/add 244 unsigned Reg = 0; 245 unsigned Rax = (unsigned)(Is64Bit ? X86::RAX : X86::EAX); 246 247 if (isSub && !isEAXLiveIn(MBB)) 248 Reg = Rax; 249 else 250 Reg = TRI->findDeadCallerSavedReg(MBB, MBBI); 251 252 unsigned AddSubRROpc = 253 isSub ? getSUBrrOpcode(Is64Bit) : getADDrrOpcode(Is64Bit); 254 if (Reg) { 255 BuildMI(MBB, MBBI, DL, TII.get(getMOVriOpcode(Is64Bit, Offset)), Reg) 256 .addImm(Offset) 257 .setMIFlag(Flag); 258 MachineInstr *MI = BuildMI(MBB, MBBI, DL, TII.get(AddSubRROpc), StackPtr) 259 .addReg(StackPtr) 260 .addReg(Reg); 261 MI->getOperand(3).setIsDead(); // The EFLAGS implicit def is dead. 262 return; 263 } else if (Offset > 8 * Chunk) { 264 // If we would need more than 8 add or sub instructions (a >16GB stack 265 // frame), it's worth spilling RAX to materialize this immediate. 266 // pushq %rax 267 // movabsq +-$Offset+-SlotSize, %rax 268 // addq %rsp, %rax 269 // xchg %rax, (%rsp) 270 // movq (%rsp), %rsp 271 assert(Is64Bit && "can't have 32-bit 16GB stack frame"); 272 BuildMI(MBB, MBBI, DL, TII.get(X86::PUSH64r)) 273 .addReg(Rax, RegState::Kill) 274 .setMIFlag(Flag); 275 // Subtract is not commutative, so negate the offset and always use add. 276 // Subtract 8 less and add 8 more to account for the PUSH we just did. 277 if (isSub) 278 Offset = -(Offset - SlotSize); 279 else 280 Offset = Offset + SlotSize; 281 BuildMI(MBB, MBBI, DL, TII.get(getMOVriOpcode(Is64Bit, Offset)), Rax) 282 .addImm(Offset) 283 .setMIFlag(Flag); 284 MachineInstr *MI = BuildMI(MBB, MBBI, DL, TII.get(X86::ADD64rr), Rax) 285 .addReg(Rax) 286 .addReg(StackPtr); 287 MI->getOperand(3).setIsDead(); // The EFLAGS implicit def is dead. 288 // Exchange the new SP in RAX with the top of the stack. 289 addRegOffset( 290 BuildMI(MBB, MBBI, DL, TII.get(X86::XCHG64rm), Rax).addReg(Rax), 291 StackPtr, false, 0); 292 // Load new SP from the top of the stack into RSP. 293 addRegOffset(BuildMI(MBB, MBBI, DL, TII.get(X86::MOV64rm), StackPtr), 294 StackPtr, false, 0); 295 return; 296 } 297 } 298 299 while (Offset) { 300 uint64_t ThisVal = std::min(Offset, Chunk); 301 if (ThisVal == SlotSize) { 302 // Use push / pop for slot sized adjustments as a size optimization. We 303 // need to find a dead register when using pop. 304 unsigned Reg = isSub 305 ? (unsigned)(Is64Bit ? X86::RAX : X86::EAX) 306 : TRI->findDeadCallerSavedReg(MBB, MBBI); 307 if (Reg) { 308 unsigned Opc = isSub 309 ? (Is64Bit ? X86::PUSH64r : X86::PUSH32r) 310 : (Is64Bit ? X86::POP64r : X86::POP32r); 311 BuildMI(MBB, MBBI, DL, TII.get(Opc)) 312 .addReg(Reg, getDefRegState(!isSub) | getUndefRegState(isSub)) 313 .setMIFlag(Flag); 314 Offset -= ThisVal; 315 continue; 316 } 317 } 318 319 BuildStackAdjustment(MBB, MBBI, DL, isSub ? -ThisVal : ThisVal, InEpilogue) 320 .setMIFlag(Flag); 321 322 Offset -= ThisVal; 323 } 324 } 325 326 MachineInstrBuilder X86FrameLowering::BuildStackAdjustment( 327 MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, 328 const DebugLoc &DL, int64_t Offset, bool InEpilogue) const { 329 assert(Offset != 0 && "zero offset stack adjustment requested"); 330 331 // On Atom, using LEA to adjust SP is preferred, but using it in the epilogue 332 // is tricky. 333 bool UseLEA; 334 if (!InEpilogue) { 335 // Check if inserting the prologue at the beginning 336 // of MBB would require to use LEA operations. 337 // We need to use LEA operations if EFLAGS is live in, because 338 // it means an instruction will read it before it gets defined. 339 UseLEA = STI.useLeaForSP() || MBB.isLiveIn(X86::EFLAGS); 340 } else { 341 // If we can use LEA for SP but we shouldn't, check that none 342 // of the terminators uses the eflags. Otherwise we will insert 343 // a ADD that will redefine the eflags and break the condition. 344 // Alternatively, we could move the ADD, but this may not be possible 345 // and is an optimization anyway. 346 UseLEA = canUseLEAForSPInEpilogue(*MBB.getParent()); 347 if (UseLEA && !STI.useLeaForSP()) 348 UseLEA = flagsNeedToBePreservedBeforeTheTerminators(MBB); 349 // If that assert breaks, that means we do not do the right thing 350 // in canUseAsEpilogue. 351 assert((UseLEA || !flagsNeedToBePreservedBeforeTheTerminators(MBB)) && 352 "We shouldn't have allowed this insertion point"); 353 } 354 355 MachineInstrBuilder MI; 356 if (UseLEA) { 357 MI = addRegOffset(BuildMI(MBB, MBBI, DL, 358 TII.get(getLEArOpcode(Uses64BitFramePtr)), 359 StackPtr), 360 StackPtr, false, Offset); 361 } else { 362 bool IsSub = Offset < 0; 363 uint64_t AbsOffset = IsSub ? -Offset : Offset; 364 const unsigned Opc = IsSub ? getSUBriOpcode(Uses64BitFramePtr, AbsOffset) 365 : getADDriOpcode(Uses64BitFramePtr, AbsOffset); 366 MI = BuildMI(MBB, MBBI, DL, TII.get(Opc), StackPtr) 367 .addReg(StackPtr) 368 .addImm(AbsOffset); 369 MI->getOperand(3).setIsDead(); // The EFLAGS implicit def is dead. 370 } 371 return MI; 372 } 373 374 int X86FrameLowering::mergeSPUpdates(MachineBasicBlock &MBB, 375 MachineBasicBlock::iterator &MBBI, 376 bool doMergeWithPrevious) const { 377 if ((doMergeWithPrevious && MBBI == MBB.begin()) || 378 (!doMergeWithPrevious && MBBI == MBB.end())) 379 return 0; 380 381 MachineBasicBlock::iterator PI = doMergeWithPrevious ? std::prev(MBBI) : MBBI; 382 383 PI = skipDebugInstructionsBackward(PI, MBB.begin()); 384 // It is assumed that ADD/SUB/LEA instruction is succeded by one CFI 385 // instruction, and that there are no DBG_VALUE or other instructions between 386 // ADD/SUB/LEA and its corresponding CFI instruction. 387 /* TODO: Add support for the case where there are multiple CFI instructions 388 below the ADD/SUB/LEA, e.g.: 389 ... 390 add 391 cfi_def_cfa_offset 392 cfi_offset 393 ... 394 */ 395 if (doMergeWithPrevious && PI != MBB.begin() && PI->isCFIInstruction()) 396 PI = std::prev(PI); 397 398 unsigned Opc = PI->getOpcode(); 399 int Offset = 0; 400 401 if ((Opc == X86::ADD64ri32 || Opc == X86::ADD64ri8 || 402 Opc == X86::ADD32ri || Opc == X86::ADD32ri8) && 403 PI->getOperand(0).getReg() == StackPtr){ 404 assert(PI->getOperand(1).getReg() == StackPtr); 405 Offset = PI->getOperand(2).getImm(); 406 } else if ((Opc == X86::LEA32r || Opc == X86::LEA64_32r) && 407 PI->getOperand(0).getReg() == StackPtr && 408 PI->getOperand(1).getReg() == StackPtr && 409 PI->getOperand(2).getImm() == 1 && 410 PI->getOperand(3).getReg() == X86::NoRegister && 411 PI->getOperand(5).getReg() == X86::NoRegister) { 412 // For LEAs we have: def = lea SP, FI, noreg, Offset, noreg. 413 Offset = PI->getOperand(4).getImm(); 414 } else if ((Opc == X86::SUB64ri32 || Opc == X86::SUB64ri8 || 415 Opc == X86::SUB32ri || Opc == X86::SUB32ri8) && 416 PI->getOperand(0).getReg() == StackPtr) { 417 assert(PI->getOperand(1).getReg() == StackPtr); 418 Offset = -PI->getOperand(2).getImm(); 419 } else 420 return 0; 421 422 PI = MBB.erase(PI); 423 if (PI != MBB.end() && PI->isCFIInstruction()) { 424 auto CIs = MBB.getParent()->getFrameInstructions(); 425 MCCFIInstruction CI = CIs[PI->getOperand(0).getCFIIndex()]; 426 if (CI.getOperation() == MCCFIInstruction::OpDefCfaOffset || 427 CI.getOperation() == MCCFIInstruction::OpAdjustCfaOffset) 428 PI = MBB.erase(PI); 429 } 430 if (!doMergeWithPrevious) 431 MBBI = skipDebugInstructionsForward(PI, MBB.end()); 432 433 return Offset; 434 } 435 436 void X86FrameLowering::BuildCFI(MachineBasicBlock &MBB, 437 MachineBasicBlock::iterator MBBI, 438 const DebugLoc &DL, 439 const MCCFIInstruction &CFIInst) const { 440 MachineFunction &MF = *MBB.getParent(); 441 unsigned CFIIndex = MF.addFrameInst(CFIInst); 442 BuildMI(MBB, MBBI, DL, TII.get(TargetOpcode::CFI_INSTRUCTION)) 443 .addCFIIndex(CFIIndex); 444 } 445 446 /// Emits Dwarf Info specifying offsets of callee saved registers and 447 /// frame pointer. This is called only when basic block sections are enabled. 448 void X86FrameLowering::emitCalleeSavedFrameMovesFullCFA( 449 MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI) const { 450 MachineFunction &MF = *MBB.getParent(); 451 if (!hasFP(MF)) { 452 emitCalleeSavedFrameMoves(MBB, MBBI, DebugLoc{}, true); 453 return; 454 } 455 const MachineModuleInfo &MMI = MF.getMMI(); 456 const MCRegisterInfo *MRI = MMI.getContext().getRegisterInfo(); 457 const Register FramePtr = TRI->getFrameRegister(MF); 458 const Register MachineFramePtr = 459 STI.isTarget64BitILP32() ? Register(getX86SubSuperRegister(FramePtr, 64)) 460 : FramePtr; 461 unsigned DwarfReg = MRI->getDwarfRegNum(MachineFramePtr, true); 462 // Offset = space for return address + size of the frame pointer itself. 463 unsigned Offset = (Is64Bit ? 8 : 4) + (Uses64BitFramePtr ? 8 : 4); 464 BuildCFI(MBB, MBBI, DebugLoc{}, 465 MCCFIInstruction::createOffset(nullptr, DwarfReg, -Offset)); 466 emitCalleeSavedFrameMoves(MBB, MBBI, DebugLoc{}, true); 467 } 468 469 void X86FrameLowering::emitCalleeSavedFrameMoves( 470 MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, 471 const DebugLoc &DL, bool IsPrologue) const { 472 MachineFunction &MF = *MBB.getParent(); 473 MachineFrameInfo &MFI = MF.getFrameInfo(); 474 MachineModuleInfo &MMI = MF.getMMI(); 475 const MCRegisterInfo *MRI = MMI.getContext().getRegisterInfo(); 476 477 // Add callee saved registers to move list. 478 const std::vector<CalleeSavedInfo> &CSI = MFI.getCalleeSavedInfo(); 479 480 // Calculate offsets. 481 for (const CalleeSavedInfo &I : CSI) { 482 int64_t Offset = MFI.getObjectOffset(I.getFrameIdx()); 483 Register Reg = I.getReg(); 484 unsigned DwarfReg = MRI->getDwarfRegNum(Reg, true); 485 486 if (IsPrologue) { 487 BuildCFI(MBB, MBBI, DL, 488 MCCFIInstruction::createOffset(nullptr, DwarfReg, Offset)); 489 } else { 490 BuildCFI(MBB, MBBI, DL, 491 MCCFIInstruction::createRestore(nullptr, DwarfReg)); 492 } 493 } 494 } 495 496 void X86FrameLowering::emitZeroCallUsedRegs(BitVector RegsToZero, 497 MachineBasicBlock &MBB) const { 498 const MachineFunction &MF = *MBB.getParent(); 499 500 // Don't clear registers that will just be reset before exiting. 501 for (const CalleeSavedInfo &CSI : MF.getFrameInfo().getCalleeSavedInfo()) 502 for (MCRegister Reg : TRI->sub_and_superregs_inclusive(CSI.getReg())) 503 RegsToZero.reset(Reg); 504 505 // Insertion point. 506 MachineBasicBlock::iterator MBBI = MBB.getFirstTerminator(); 507 508 // We don't need to zero out registers that are clobbered by "pop" 509 // instructions. 510 for (MachineBasicBlock::iterator I = MBBI, E = MBB.end(); I != E; ++I) 511 for (const MachineOperand &MO : I->operands()) { 512 if (!MO.isReg()) 513 continue; 514 515 for (const MCPhysReg &Reg : TRI->sub_and_superregs_inclusive(MO.getReg())) 516 RegsToZero.reset(Reg); 517 } 518 519 // Fake a debug loc. 520 DebugLoc DL; 521 if (MBBI != MBB.end()) 522 DL = MBBI->getDebugLoc(); 523 524 // Zero out FP stack if referenced. Do this outside of the loop below so that 525 // it's done only once. 526 const X86Subtarget &ST = MF.getSubtarget<X86Subtarget>(); 527 for (MCRegister Reg : RegsToZero.set_bits()) { 528 if (!X86::RFP80RegClass.contains(Reg)) 529 continue; 530 531 unsigned NumFPRegs = ST.is64Bit() ? 8 : 7; 532 for (unsigned i = 0; i != NumFPRegs; ++i) 533 BuildMI(MBB, MBBI, DL, TII.get(X86::LD_F0)); 534 535 for (unsigned i = 0; i != NumFPRegs; ++i) 536 BuildMI(MBB, MBBI, DL, TII.get(X86::ST_FPrr)).addReg(X86::ST0); 537 break; 538 } 539 540 // For GPRs, we only care to clear out the 32-bit register. 541 for (MCRegister Reg : RegsToZero.set_bits()) 542 if (TRI->isGeneralPurposeRegister(MF, Reg)) { 543 Reg = getX86SubSuperRegisterOrZero(Reg, 32); 544 for (const MCPhysReg &Reg : TRI->sub_and_superregs_inclusive(Reg)) 545 RegsToZero.reset(Reg); 546 RegsToZero.set(Reg); 547 } 548 549 // Zero out registers. 550 for (MCRegister Reg : RegsToZero.set_bits()) { 551 if (ST.hasMMX() && X86::VR64RegClass.contains(Reg)) 552 // FIXME: Ignore MMX registers? 553 continue; 554 555 unsigned XorOp; 556 if (TRI->isGeneralPurposeRegister(MF, Reg)) { 557 XorOp = X86::XOR32rr; 558 } else if (X86::VR128RegClass.contains(Reg)) { 559 // XMM# 560 if (!ST.hasSSE1()) 561 continue; 562 XorOp = X86::PXORrr; 563 } else if (X86::VR256RegClass.contains(Reg)) { 564 // YMM# 565 if (!ST.hasAVX()) 566 continue; 567 XorOp = X86::VPXORrr; 568 } else if (X86::VR512RegClass.contains(Reg)) { 569 // ZMM# 570 if (!ST.hasAVX512()) 571 continue; 572 XorOp = X86::VPXORYrr; 573 } else if (X86::VK1RegClass.contains(Reg) || 574 X86::VK2RegClass.contains(Reg) || 575 X86::VK4RegClass.contains(Reg) || 576 X86::VK8RegClass.contains(Reg) || 577 X86::VK16RegClass.contains(Reg)) { 578 if (!ST.hasVLX()) 579 continue; 580 XorOp = ST.hasBWI() ? X86::KXORQrr : X86::KXORWrr; 581 } else { 582 continue; 583 } 584 585 BuildMI(MBB, MBBI, DL, TII.get(XorOp), Reg) 586 .addReg(Reg, RegState::Undef) 587 .addReg(Reg, RegState::Undef); 588 } 589 } 590 591 void X86FrameLowering::emitStackProbe( 592 MachineFunction &MF, MachineBasicBlock &MBB, 593 MachineBasicBlock::iterator MBBI, const DebugLoc &DL, bool InProlog, 594 Optional<MachineFunction::DebugInstrOperandPair> InstrNum) const { 595 const X86Subtarget &STI = MF.getSubtarget<X86Subtarget>(); 596 if (STI.isTargetWindowsCoreCLR()) { 597 if (InProlog) { 598 BuildMI(MBB, MBBI, DL, TII.get(X86::STACKALLOC_W_PROBING)) 599 .addImm(0 /* no explicit stack size */); 600 } else { 601 emitStackProbeInline(MF, MBB, MBBI, DL, false); 602 } 603 } else { 604 emitStackProbeCall(MF, MBB, MBBI, DL, InProlog, InstrNum); 605 } 606 } 607 608 bool X86FrameLowering::stackProbeFunctionModifiesSP() const { 609 return STI.isOSWindows() && !STI.isTargetWin64(); 610 } 611 612 void X86FrameLowering::inlineStackProbe(MachineFunction &MF, 613 MachineBasicBlock &PrologMBB) const { 614 auto Where = llvm::find_if(PrologMBB, [](MachineInstr &MI) { 615 return MI.getOpcode() == X86::STACKALLOC_W_PROBING; 616 }); 617 if (Where != PrologMBB.end()) { 618 DebugLoc DL = PrologMBB.findDebugLoc(Where); 619 emitStackProbeInline(MF, PrologMBB, Where, DL, true); 620 Where->eraseFromParent(); 621 } 622 } 623 624 void X86FrameLowering::emitStackProbeInline(MachineFunction &MF, 625 MachineBasicBlock &MBB, 626 MachineBasicBlock::iterator MBBI, 627 const DebugLoc &DL, 628 bool InProlog) const { 629 const X86Subtarget &STI = MF.getSubtarget<X86Subtarget>(); 630 if (STI.isTargetWindowsCoreCLR() && STI.is64Bit()) 631 emitStackProbeInlineWindowsCoreCLR64(MF, MBB, MBBI, DL, InProlog); 632 else 633 emitStackProbeInlineGeneric(MF, MBB, MBBI, DL, InProlog); 634 } 635 636 void X86FrameLowering::emitStackProbeInlineGeneric( 637 MachineFunction &MF, MachineBasicBlock &MBB, 638 MachineBasicBlock::iterator MBBI, const DebugLoc &DL, bool InProlog) const { 639 MachineInstr &AllocWithProbe = *MBBI; 640 uint64_t Offset = AllocWithProbe.getOperand(0).getImm(); 641 642 const X86Subtarget &STI = MF.getSubtarget<X86Subtarget>(); 643 const X86TargetLowering &TLI = *STI.getTargetLowering(); 644 assert(!(STI.is64Bit() && STI.isTargetWindowsCoreCLR()) && 645 "different expansion expected for CoreCLR 64 bit"); 646 647 const uint64_t StackProbeSize = TLI.getStackProbeSize(MF); 648 uint64_t ProbeChunk = StackProbeSize * 8; 649 650 uint64_t MaxAlign = 651 TRI->hasStackRealignment(MF) ? calculateMaxStackAlign(MF) : 0; 652 653 // Synthesize a loop or unroll it, depending on the number of iterations. 654 // BuildStackAlignAND ensures that only MaxAlign % StackProbeSize bits left 655 // between the unaligned rsp and current rsp. 656 if (Offset > ProbeChunk) { 657 emitStackProbeInlineGenericLoop(MF, MBB, MBBI, DL, Offset, 658 MaxAlign % StackProbeSize); 659 } else { 660 emitStackProbeInlineGenericBlock(MF, MBB, MBBI, DL, Offset, 661 MaxAlign % StackProbeSize); 662 } 663 } 664 665 void X86FrameLowering::emitStackProbeInlineGenericBlock( 666 MachineFunction &MF, MachineBasicBlock &MBB, 667 MachineBasicBlock::iterator MBBI, const DebugLoc &DL, uint64_t Offset, 668 uint64_t AlignOffset) const { 669 670 const bool NeedsDwarfCFI = needsDwarfCFI(MF); 671 const bool HasFP = hasFP(MF); 672 const X86Subtarget &STI = MF.getSubtarget<X86Subtarget>(); 673 const X86TargetLowering &TLI = *STI.getTargetLowering(); 674 const unsigned Opc = getSUBriOpcode(Uses64BitFramePtr, Offset); 675 const unsigned MovMIOpc = Is64Bit ? X86::MOV64mi32 : X86::MOV32mi; 676 const uint64_t StackProbeSize = TLI.getStackProbeSize(MF); 677 678 uint64_t CurrentOffset = 0; 679 680 assert(AlignOffset < StackProbeSize); 681 682 // If the offset is so small it fits within a page, there's nothing to do. 683 if (StackProbeSize < Offset + AlignOffset) { 684 685 MachineInstr *MI = BuildMI(MBB, MBBI, DL, TII.get(Opc), StackPtr) 686 .addReg(StackPtr) 687 .addImm(StackProbeSize - AlignOffset) 688 .setMIFlag(MachineInstr::FrameSetup); 689 if (!HasFP && NeedsDwarfCFI) { 690 BuildCFI(MBB, MBBI, DL, 691 MCCFIInstruction::createAdjustCfaOffset( 692 nullptr, StackProbeSize - AlignOffset)); 693 } 694 MI->getOperand(3).setIsDead(); // The EFLAGS implicit def is dead. 695 696 addRegOffset(BuildMI(MBB, MBBI, DL, TII.get(MovMIOpc)) 697 .setMIFlag(MachineInstr::FrameSetup), 698 StackPtr, false, 0) 699 .addImm(0) 700 .setMIFlag(MachineInstr::FrameSetup); 701 NumFrameExtraProbe++; 702 CurrentOffset = StackProbeSize - AlignOffset; 703 } 704 705 // For the next N - 1 pages, just probe. I tried to take advantage of 706 // natural probes but it implies much more logic and there was very few 707 // interesting natural probes to interleave. 708 while (CurrentOffset + StackProbeSize < Offset) { 709 MachineInstr *MI = BuildMI(MBB, MBBI, DL, TII.get(Opc), StackPtr) 710 .addReg(StackPtr) 711 .addImm(StackProbeSize) 712 .setMIFlag(MachineInstr::FrameSetup); 713 MI->getOperand(3).setIsDead(); // The EFLAGS implicit def is dead. 714 715 if (!HasFP && NeedsDwarfCFI) { 716 BuildCFI( 717 MBB, MBBI, DL, 718 MCCFIInstruction::createAdjustCfaOffset(nullptr, StackProbeSize)); 719 } 720 addRegOffset(BuildMI(MBB, MBBI, DL, TII.get(MovMIOpc)) 721 .setMIFlag(MachineInstr::FrameSetup), 722 StackPtr, false, 0) 723 .addImm(0) 724 .setMIFlag(MachineInstr::FrameSetup); 725 NumFrameExtraProbe++; 726 CurrentOffset += StackProbeSize; 727 } 728 729 // No need to probe the tail, it is smaller than a Page. 730 uint64_t ChunkSize = Offset - CurrentOffset; 731 MachineInstr *MI = BuildMI(MBB, MBBI, DL, TII.get(Opc), StackPtr) 732 .addReg(StackPtr) 733 .addImm(ChunkSize) 734 .setMIFlag(MachineInstr::FrameSetup); 735 // No need to adjust Dwarf CFA offset here, the last position of the stack has 736 // been defined 737 MI->getOperand(3).setIsDead(); // The EFLAGS implicit def is dead. 738 } 739 740 void X86FrameLowering::emitStackProbeInlineGenericLoop( 741 MachineFunction &MF, MachineBasicBlock &MBB, 742 MachineBasicBlock::iterator MBBI, const DebugLoc &DL, uint64_t Offset, 743 uint64_t AlignOffset) const { 744 assert(Offset && "null offset"); 745 746 const bool NeedsDwarfCFI = needsDwarfCFI(MF); 747 const bool HasFP = hasFP(MF); 748 const X86Subtarget &STI = MF.getSubtarget<X86Subtarget>(); 749 const X86TargetLowering &TLI = *STI.getTargetLowering(); 750 const unsigned MovMIOpc = Is64Bit ? X86::MOV64mi32 : X86::MOV32mi; 751 const uint64_t StackProbeSize = TLI.getStackProbeSize(MF); 752 753 if (AlignOffset) { 754 if (AlignOffset < StackProbeSize) { 755 // Perform a first smaller allocation followed by a probe. 756 const unsigned SUBOpc = getSUBriOpcode(Uses64BitFramePtr, AlignOffset); 757 MachineInstr *MI = BuildMI(MBB, MBBI, DL, TII.get(SUBOpc), StackPtr) 758 .addReg(StackPtr) 759 .addImm(AlignOffset) 760 .setMIFlag(MachineInstr::FrameSetup); 761 MI->getOperand(3).setIsDead(); // The EFLAGS implicit def is dead. 762 763 addRegOffset(BuildMI(MBB, MBBI, DL, TII.get(MovMIOpc)) 764 .setMIFlag(MachineInstr::FrameSetup), 765 StackPtr, false, 0) 766 .addImm(0) 767 .setMIFlag(MachineInstr::FrameSetup); 768 NumFrameExtraProbe++; 769 Offset -= AlignOffset; 770 } 771 } 772 773 // Synthesize a loop 774 NumFrameLoopProbe++; 775 const BasicBlock *LLVM_BB = MBB.getBasicBlock(); 776 777 MachineBasicBlock *testMBB = MF.CreateMachineBasicBlock(LLVM_BB); 778 MachineBasicBlock *tailMBB = MF.CreateMachineBasicBlock(LLVM_BB); 779 780 MachineFunction::iterator MBBIter = ++MBB.getIterator(); 781 MF.insert(MBBIter, testMBB); 782 MF.insert(MBBIter, tailMBB); 783 784 Register FinalStackProbed = Uses64BitFramePtr ? X86::R11 785 : Is64Bit ? X86::R11D 786 : X86::EAX; 787 788 BuildMI(MBB, MBBI, DL, TII.get(TargetOpcode::COPY), FinalStackProbed) 789 .addReg(StackPtr) 790 .setMIFlag(MachineInstr::FrameSetup); 791 792 // save loop bound 793 { 794 const unsigned BoundOffset = alignDown(Offset, StackProbeSize); 795 const unsigned SUBOpc = getSUBriOpcode(Uses64BitFramePtr, BoundOffset); 796 BuildMI(MBB, MBBI, DL, TII.get(SUBOpc), FinalStackProbed) 797 .addReg(FinalStackProbed) 798 .addImm(BoundOffset) 799 .setMIFlag(MachineInstr::FrameSetup); 800 801 // while in the loop, use loop-invariant reg for CFI, 802 // instead of the stack pointer, which changes during the loop 803 if (!HasFP && NeedsDwarfCFI) { 804 // x32 uses the same DWARF register numbers as x86-64, 805 // so there isn't a register number for r11d, we must use r11 instead 806 const Register DwarfFinalStackProbed = 807 STI.isTarget64BitILP32() 808 ? Register(getX86SubSuperRegister(FinalStackProbed, 64)) 809 : FinalStackProbed; 810 811 BuildCFI(MBB, MBBI, DL, 812 MCCFIInstruction::createDefCfaRegister( 813 nullptr, TRI->getDwarfRegNum(DwarfFinalStackProbed, true))); 814 BuildCFI(MBB, MBBI, DL, 815 MCCFIInstruction::createAdjustCfaOffset(nullptr, BoundOffset)); 816 } 817 } 818 819 // allocate a page 820 { 821 const unsigned SUBOpc = getSUBriOpcode(Uses64BitFramePtr, StackProbeSize); 822 BuildMI(testMBB, DL, TII.get(SUBOpc), StackPtr) 823 .addReg(StackPtr) 824 .addImm(StackProbeSize) 825 .setMIFlag(MachineInstr::FrameSetup); 826 } 827 828 // touch the page 829 addRegOffset(BuildMI(testMBB, DL, TII.get(MovMIOpc)) 830 .setMIFlag(MachineInstr::FrameSetup), 831 StackPtr, false, 0) 832 .addImm(0) 833 .setMIFlag(MachineInstr::FrameSetup); 834 835 // cmp with stack pointer bound 836 BuildMI(testMBB, DL, TII.get(Uses64BitFramePtr ? X86::CMP64rr : X86::CMP32rr)) 837 .addReg(StackPtr) 838 .addReg(FinalStackProbed) 839 .setMIFlag(MachineInstr::FrameSetup); 840 841 // jump 842 BuildMI(testMBB, DL, TII.get(X86::JCC_1)) 843 .addMBB(testMBB) 844 .addImm(X86::COND_NE) 845 .setMIFlag(MachineInstr::FrameSetup); 846 testMBB->addSuccessor(testMBB); 847 testMBB->addSuccessor(tailMBB); 848 849 // BB management 850 tailMBB->splice(tailMBB->end(), &MBB, MBBI, MBB.end()); 851 tailMBB->transferSuccessorsAndUpdatePHIs(&MBB); 852 MBB.addSuccessor(testMBB); 853 854 // handle tail 855 const unsigned TailOffset = Offset % StackProbeSize; 856 MachineBasicBlock::iterator TailMBBIter = tailMBB->begin(); 857 if (TailOffset) { 858 const unsigned Opc = getSUBriOpcode(Uses64BitFramePtr, TailOffset); 859 BuildMI(*tailMBB, TailMBBIter, DL, TII.get(Opc), StackPtr) 860 .addReg(StackPtr) 861 .addImm(TailOffset) 862 .setMIFlag(MachineInstr::FrameSetup); 863 } 864 865 // after the loop, switch back to stack pointer for CFI 866 if (!HasFP && NeedsDwarfCFI) { 867 // x32 uses the same DWARF register numbers as x86-64, 868 // so there isn't a register number for esp, we must use rsp instead 869 const Register DwarfStackPtr = 870 STI.isTarget64BitILP32() 871 ? Register(getX86SubSuperRegister(StackPtr, 64)) 872 : Register(StackPtr); 873 874 BuildCFI(*tailMBB, TailMBBIter, DL, 875 MCCFIInstruction::createDefCfaRegister( 876 nullptr, TRI->getDwarfRegNum(DwarfStackPtr, true))); 877 } 878 879 // Update Live In information 880 recomputeLiveIns(*testMBB); 881 recomputeLiveIns(*tailMBB); 882 } 883 884 void X86FrameLowering::emitStackProbeInlineWindowsCoreCLR64( 885 MachineFunction &MF, MachineBasicBlock &MBB, 886 MachineBasicBlock::iterator MBBI, const DebugLoc &DL, bool InProlog) const { 887 const X86Subtarget &STI = MF.getSubtarget<X86Subtarget>(); 888 assert(STI.is64Bit() && "different expansion needed for 32 bit"); 889 assert(STI.isTargetWindowsCoreCLR() && "custom expansion expects CoreCLR"); 890 const TargetInstrInfo &TII = *STI.getInstrInfo(); 891 const BasicBlock *LLVM_BB = MBB.getBasicBlock(); 892 893 // RAX contains the number of bytes of desired stack adjustment. 894 // The handling here assumes this value has already been updated so as to 895 // maintain stack alignment. 896 // 897 // We need to exit with RSP modified by this amount and execute suitable 898 // page touches to notify the OS that we're growing the stack responsibly. 899 // All stack probing must be done without modifying RSP. 900 // 901 // MBB: 902 // SizeReg = RAX; 903 // ZeroReg = 0 904 // CopyReg = RSP 905 // Flags, TestReg = CopyReg - SizeReg 906 // FinalReg = !Flags.Ovf ? TestReg : ZeroReg 907 // LimitReg = gs magic thread env access 908 // if FinalReg >= LimitReg goto ContinueMBB 909 // RoundBB: 910 // RoundReg = page address of FinalReg 911 // LoopMBB: 912 // LoopReg = PHI(LimitReg,ProbeReg) 913 // ProbeReg = LoopReg - PageSize 914 // [ProbeReg] = 0 915 // if (ProbeReg > RoundReg) goto LoopMBB 916 // ContinueMBB: 917 // RSP = RSP - RAX 918 // [rest of original MBB] 919 920 // Set up the new basic blocks 921 MachineBasicBlock *RoundMBB = MF.CreateMachineBasicBlock(LLVM_BB); 922 MachineBasicBlock *LoopMBB = MF.CreateMachineBasicBlock(LLVM_BB); 923 MachineBasicBlock *ContinueMBB = MF.CreateMachineBasicBlock(LLVM_BB); 924 925 MachineFunction::iterator MBBIter = std::next(MBB.getIterator()); 926 MF.insert(MBBIter, RoundMBB); 927 MF.insert(MBBIter, LoopMBB); 928 MF.insert(MBBIter, ContinueMBB); 929 930 // Split MBB and move the tail portion down to ContinueMBB. 931 MachineBasicBlock::iterator BeforeMBBI = std::prev(MBBI); 932 ContinueMBB->splice(ContinueMBB->begin(), &MBB, MBBI, MBB.end()); 933 ContinueMBB->transferSuccessorsAndUpdatePHIs(&MBB); 934 935 // Some useful constants 936 const int64_t ThreadEnvironmentStackLimit = 0x10; 937 const int64_t PageSize = 0x1000; 938 const int64_t PageMask = ~(PageSize - 1); 939 940 // Registers we need. For the normal case we use virtual 941 // registers. For the prolog expansion we use RAX, RCX and RDX. 942 MachineRegisterInfo &MRI = MF.getRegInfo(); 943 const TargetRegisterClass *RegClass = &X86::GR64RegClass; 944 const Register SizeReg = InProlog ? X86::RAX 945 : MRI.createVirtualRegister(RegClass), 946 ZeroReg = InProlog ? X86::RCX 947 : MRI.createVirtualRegister(RegClass), 948 CopyReg = InProlog ? X86::RDX 949 : MRI.createVirtualRegister(RegClass), 950 TestReg = InProlog ? X86::RDX 951 : MRI.createVirtualRegister(RegClass), 952 FinalReg = InProlog ? X86::RDX 953 : MRI.createVirtualRegister(RegClass), 954 RoundedReg = InProlog ? X86::RDX 955 : MRI.createVirtualRegister(RegClass), 956 LimitReg = InProlog ? X86::RCX 957 : MRI.createVirtualRegister(RegClass), 958 JoinReg = InProlog ? X86::RCX 959 : MRI.createVirtualRegister(RegClass), 960 ProbeReg = InProlog ? X86::RCX 961 : MRI.createVirtualRegister(RegClass); 962 963 // SP-relative offsets where we can save RCX and RDX. 964 int64_t RCXShadowSlot = 0; 965 int64_t RDXShadowSlot = 0; 966 967 // If inlining in the prolog, save RCX and RDX. 968 if (InProlog) { 969 // Compute the offsets. We need to account for things already 970 // pushed onto the stack at this point: return address, frame 971 // pointer (if used), and callee saves. 972 X86MachineFunctionInfo *X86FI = MF.getInfo<X86MachineFunctionInfo>(); 973 const int64_t CalleeSaveSize = X86FI->getCalleeSavedFrameSize(); 974 const bool HasFP = hasFP(MF); 975 976 // Check if we need to spill RCX and/or RDX. 977 // Here we assume that no earlier prologue instruction changes RCX and/or 978 // RDX, so checking the block live-ins is enough. 979 const bool IsRCXLiveIn = MBB.isLiveIn(X86::RCX); 980 const bool IsRDXLiveIn = MBB.isLiveIn(X86::RDX); 981 int64_t InitSlot = 8 + CalleeSaveSize + (HasFP ? 8 : 0); 982 // Assign the initial slot to both registers, then change RDX's slot if both 983 // need to be spilled. 984 if (IsRCXLiveIn) 985 RCXShadowSlot = InitSlot; 986 if (IsRDXLiveIn) 987 RDXShadowSlot = InitSlot; 988 if (IsRDXLiveIn && IsRCXLiveIn) 989 RDXShadowSlot += 8; 990 // Emit the saves if needed. 991 if (IsRCXLiveIn) 992 addRegOffset(BuildMI(&MBB, DL, TII.get(X86::MOV64mr)), X86::RSP, false, 993 RCXShadowSlot) 994 .addReg(X86::RCX); 995 if (IsRDXLiveIn) 996 addRegOffset(BuildMI(&MBB, DL, TII.get(X86::MOV64mr)), X86::RSP, false, 997 RDXShadowSlot) 998 .addReg(X86::RDX); 999 } else { 1000 // Not in the prolog. Copy RAX to a virtual reg. 1001 BuildMI(&MBB, DL, TII.get(X86::MOV64rr), SizeReg).addReg(X86::RAX); 1002 } 1003 1004 // Add code to MBB to check for overflow and set the new target stack pointer 1005 // to zero if so. 1006 BuildMI(&MBB, DL, TII.get(X86::XOR64rr), ZeroReg) 1007 .addReg(ZeroReg, RegState::Undef) 1008 .addReg(ZeroReg, RegState::Undef); 1009 BuildMI(&MBB, DL, TII.get(X86::MOV64rr), CopyReg).addReg(X86::RSP); 1010 BuildMI(&MBB, DL, TII.get(X86::SUB64rr), TestReg) 1011 .addReg(CopyReg) 1012 .addReg(SizeReg); 1013 BuildMI(&MBB, DL, TII.get(X86::CMOV64rr), FinalReg) 1014 .addReg(TestReg) 1015 .addReg(ZeroReg) 1016 .addImm(X86::COND_B); 1017 1018 // FinalReg now holds final stack pointer value, or zero if 1019 // allocation would overflow. Compare against the current stack 1020 // limit from the thread environment block. Note this limit is the 1021 // lowest touched page on the stack, not the point at which the OS 1022 // will cause an overflow exception, so this is just an optimization 1023 // to avoid unnecessarily touching pages that are below the current 1024 // SP but already committed to the stack by the OS. 1025 BuildMI(&MBB, DL, TII.get(X86::MOV64rm), LimitReg) 1026 .addReg(0) 1027 .addImm(1) 1028 .addReg(0) 1029 .addImm(ThreadEnvironmentStackLimit) 1030 .addReg(X86::GS); 1031 BuildMI(&MBB, DL, TII.get(X86::CMP64rr)).addReg(FinalReg).addReg(LimitReg); 1032 // Jump if the desired stack pointer is at or above the stack limit. 1033 BuildMI(&MBB, DL, TII.get(X86::JCC_1)).addMBB(ContinueMBB).addImm(X86::COND_AE); 1034 1035 // Add code to roundMBB to round the final stack pointer to a page boundary. 1036 RoundMBB->addLiveIn(FinalReg); 1037 BuildMI(RoundMBB, DL, TII.get(X86::AND64ri32), RoundedReg) 1038 .addReg(FinalReg) 1039 .addImm(PageMask); 1040 BuildMI(RoundMBB, DL, TII.get(X86::JMP_1)).addMBB(LoopMBB); 1041 1042 // LimitReg now holds the current stack limit, RoundedReg page-rounded 1043 // final RSP value. Add code to loopMBB to decrement LimitReg page-by-page 1044 // and probe until we reach RoundedReg. 1045 if (!InProlog) { 1046 BuildMI(LoopMBB, DL, TII.get(X86::PHI), JoinReg) 1047 .addReg(LimitReg) 1048 .addMBB(RoundMBB) 1049 .addReg(ProbeReg) 1050 .addMBB(LoopMBB); 1051 } 1052 1053 LoopMBB->addLiveIn(JoinReg); 1054 addRegOffset(BuildMI(LoopMBB, DL, TII.get(X86::LEA64r), ProbeReg), JoinReg, 1055 false, -PageSize); 1056 1057 // Probe by storing a byte onto the stack. 1058 BuildMI(LoopMBB, DL, TII.get(X86::MOV8mi)) 1059 .addReg(ProbeReg) 1060 .addImm(1) 1061 .addReg(0) 1062 .addImm(0) 1063 .addReg(0) 1064 .addImm(0); 1065 1066 LoopMBB->addLiveIn(RoundedReg); 1067 BuildMI(LoopMBB, DL, TII.get(X86::CMP64rr)) 1068 .addReg(RoundedReg) 1069 .addReg(ProbeReg); 1070 BuildMI(LoopMBB, DL, TII.get(X86::JCC_1)).addMBB(LoopMBB).addImm(X86::COND_NE); 1071 1072 MachineBasicBlock::iterator ContinueMBBI = ContinueMBB->getFirstNonPHI(); 1073 1074 // If in prolog, restore RDX and RCX. 1075 if (InProlog) { 1076 if (RCXShadowSlot) // It means we spilled RCX in the prologue. 1077 addRegOffset(BuildMI(*ContinueMBB, ContinueMBBI, DL, 1078 TII.get(X86::MOV64rm), X86::RCX), 1079 X86::RSP, false, RCXShadowSlot); 1080 if (RDXShadowSlot) // It means we spilled RDX in the prologue. 1081 addRegOffset(BuildMI(*ContinueMBB, ContinueMBBI, DL, 1082 TII.get(X86::MOV64rm), X86::RDX), 1083 X86::RSP, false, RDXShadowSlot); 1084 } 1085 1086 // Now that the probing is done, add code to continueMBB to update 1087 // the stack pointer for real. 1088 ContinueMBB->addLiveIn(SizeReg); 1089 BuildMI(*ContinueMBB, ContinueMBBI, DL, TII.get(X86::SUB64rr), X86::RSP) 1090 .addReg(X86::RSP) 1091 .addReg(SizeReg); 1092 1093 // Add the control flow edges we need. 1094 MBB.addSuccessor(ContinueMBB); 1095 MBB.addSuccessor(RoundMBB); 1096 RoundMBB->addSuccessor(LoopMBB); 1097 LoopMBB->addSuccessor(ContinueMBB); 1098 LoopMBB->addSuccessor(LoopMBB); 1099 1100 // Mark all the instructions added to the prolog as frame setup. 1101 if (InProlog) { 1102 for (++BeforeMBBI; BeforeMBBI != MBB.end(); ++BeforeMBBI) { 1103 BeforeMBBI->setFlag(MachineInstr::FrameSetup); 1104 } 1105 for (MachineInstr &MI : *RoundMBB) { 1106 MI.setFlag(MachineInstr::FrameSetup); 1107 } 1108 for (MachineInstr &MI : *LoopMBB) { 1109 MI.setFlag(MachineInstr::FrameSetup); 1110 } 1111 for (MachineBasicBlock::iterator CMBBI = ContinueMBB->begin(); 1112 CMBBI != ContinueMBBI; ++CMBBI) { 1113 CMBBI->setFlag(MachineInstr::FrameSetup); 1114 } 1115 } 1116 } 1117 1118 void X86FrameLowering::emitStackProbeCall( 1119 MachineFunction &MF, MachineBasicBlock &MBB, 1120 MachineBasicBlock::iterator MBBI, const DebugLoc &DL, bool InProlog, 1121 Optional<MachineFunction::DebugInstrOperandPair> InstrNum) const { 1122 bool IsLargeCodeModel = MF.getTarget().getCodeModel() == CodeModel::Large; 1123 1124 // FIXME: Add indirect thunk support and remove this. 1125 if (Is64Bit && IsLargeCodeModel && STI.useIndirectThunkCalls()) 1126 report_fatal_error("Emitting stack probe calls on 64-bit with the large " 1127 "code model and indirect thunks not yet implemented."); 1128 1129 unsigned CallOp; 1130 if (Is64Bit) 1131 CallOp = IsLargeCodeModel ? X86::CALL64r : X86::CALL64pcrel32; 1132 else 1133 CallOp = X86::CALLpcrel32; 1134 1135 StringRef Symbol = STI.getTargetLowering()->getStackProbeSymbolName(MF); 1136 1137 MachineInstrBuilder CI; 1138 MachineBasicBlock::iterator ExpansionMBBI = std::prev(MBBI); 1139 1140 // All current stack probes take AX and SP as input, clobber flags, and 1141 // preserve all registers. x86_64 probes leave RSP unmodified. 1142 if (Is64Bit && MF.getTarget().getCodeModel() == CodeModel::Large) { 1143 // For the large code model, we have to call through a register. Use R11, 1144 // as it is scratch in all supported calling conventions. 1145 BuildMI(MBB, MBBI, DL, TII.get(X86::MOV64ri), X86::R11) 1146 .addExternalSymbol(MF.createExternalSymbolName(Symbol)); 1147 CI = BuildMI(MBB, MBBI, DL, TII.get(CallOp)).addReg(X86::R11); 1148 } else { 1149 CI = BuildMI(MBB, MBBI, DL, TII.get(CallOp)) 1150 .addExternalSymbol(MF.createExternalSymbolName(Symbol)); 1151 } 1152 1153 unsigned AX = Uses64BitFramePtr ? X86::RAX : X86::EAX; 1154 unsigned SP = Uses64BitFramePtr ? X86::RSP : X86::ESP; 1155 CI.addReg(AX, RegState::Implicit) 1156 .addReg(SP, RegState::Implicit) 1157 .addReg(AX, RegState::Define | RegState::Implicit) 1158 .addReg(SP, RegState::Define | RegState::Implicit) 1159 .addReg(X86::EFLAGS, RegState::Define | RegState::Implicit); 1160 1161 MachineInstr *ModInst = CI; 1162 if (STI.isTargetWin64() || !STI.isOSWindows()) { 1163 // MSVC x32's _chkstk and cygwin/mingw's _alloca adjust %esp themselves. 1164 // MSVC x64's __chkstk and cygwin/mingw's ___chkstk_ms do not adjust %rsp 1165 // themselves. They also does not clobber %rax so we can reuse it when 1166 // adjusting %rsp. 1167 // All other platforms do not specify a particular ABI for the stack probe 1168 // function, so we arbitrarily define it to not adjust %esp/%rsp itself. 1169 ModInst = 1170 BuildMI(MBB, MBBI, DL, TII.get(getSUBrrOpcode(Uses64BitFramePtr)), SP) 1171 .addReg(SP) 1172 .addReg(AX); 1173 } 1174 1175 // DebugInfo variable locations -- if there's an instruction number for the 1176 // allocation (i.e., DYN_ALLOC_*), substitute it for the instruction that 1177 // modifies SP. 1178 if (InstrNum) { 1179 if (STI.isTargetWin64() || !STI.isOSWindows()) { 1180 // Label destination operand of the subtract. 1181 MF.makeDebugValueSubstitution(*InstrNum, 1182 {ModInst->getDebugInstrNum(), 0}); 1183 } else { 1184 // Label the call. The operand number is the penultimate operand, zero 1185 // based. 1186 unsigned SPDefOperand = ModInst->getNumOperands() - 2; 1187 MF.makeDebugValueSubstitution( 1188 *InstrNum, {ModInst->getDebugInstrNum(), SPDefOperand}); 1189 } 1190 } 1191 1192 if (InProlog) { 1193 // Apply the frame setup flag to all inserted instrs. 1194 for (++ExpansionMBBI; ExpansionMBBI != MBBI; ++ExpansionMBBI) 1195 ExpansionMBBI->setFlag(MachineInstr::FrameSetup); 1196 } 1197 } 1198 1199 static unsigned calculateSetFPREG(uint64_t SPAdjust) { 1200 // Win64 ABI has a less restrictive limitation of 240; 128 works equally well 1201 // and might require smaller successive adjustments. 1202 const uint64_t Win64MaxSEHOffset = 128; 1203 uint64_t SEHFrameOffset = std::min(SPAdjust, Win64MaxSEHOffset); 1204 // Win64 ABI requires 16-byte alignment for the UWOP_SET_FPREG opcode. 1205 return SEHFrameOffset & -16; 1206 } 1207 1208 // If we're forcing a stack realignment we can't rely on just the frame 1209 // info, we need to know the ABI stack alignment as well in case we 1210 // have a call out. Otherwise just make sure we have some alignment - we'll 1211 // go with the minimum SlotSize. 1212 uint64_t X86FrameLowering::calculateMaxStackAlign(const MachineFunction &MF) const { 1213 const MachineFrameInfo &MFI = MF.getFrameInfo(); 1214 Align MaxAlign = MFI.getMaxAlign(); // Desired stack alignment. 1215 Align StackAlign = getStackAlign(); 1216 if (MF.getFunction().hasFnAttribute("stackrealign")) { 1217 if (MFI.hasCalls()) 1218 MaxAlign = (StackAlign > MaxAlign) ? StackAlign : MaxAlign; 1219 else if (MaxAlign < SlotSize) 1220 MaxAlign = Align(SlotSize); 1221 } 1222 return MaxAlign.value(); 1223 } 1224 1225 void X86FrameLowering::BuildStackAlignAND(MachineBasicBlock &MBB, 1226 MachineBasicBlock::iterator MBBI, 1227 const DebugLoc &DL, unsigned Reg, 1228 uint64_t MaxAlign) const { 1229 uint64_t Val = -MaxAlign; 1230 unsigned AndOp = getANDriOpcode(Uses64BitFramePtr, Val); 1231 1232 MachineFunction &MF = *MBB.getParent(); 1233 const X86Subtarget &STI = MF.getSubtarget<X86Subtarget>(); 1234 const X86TargetLowering &TLI = *STI.getTargetLowering(); 1235 const uint64_t StackProbeSize = TLI.getStackProbeSize(MF); 1236 const bool EmitInlineStackProbe = TLI.hasInlineStackProbe(MF); 1237 1238 // We want to make sure that (in worst case) less than StackProbeSize bytes 1239 // are not probed after the AND. This assumption is used in 1240 // emitStackProbeInlineGeneric. 1241 if (Reg == StackPtr && EmitInlineStackProbe && MaxAlign >= StackProbeSize) { 1242 { 1243 NumFrameLoopProbe++; 1244 MachineBasicBlock *entryMBB = 1245 MF.CreateMachineBasicBlock(MBB.getBasicBlock()); 1246 MachineBasicBlock *headMBB = 1247 MF.CreateMachineBasicBlock(MBB.getBasicBlock()); 1248 MachineBasicBlock *bodyMBB = 1249 MF.CreateMachineBasicBlock(MBB.getBasicBlock()); 1250 MachineBasicBlock *footMBB = 1251 MF.CreateMachineBasicBlock(MBB.getBasicBlock()); 1252 1253 MachineFunction::iterator MBBIter = MBB.getIterator(); 1254 MF.insert(MBBIter, entryMBB); 1255 MF.insert(MBBIter, headMBB); 1256 MF.insert(MBBIter, bodyMBB); 1257 MF.insert(MBBIter, footMBB); 1258 const unsigned MovMIOpc = Is64Bit ? X86::MOV64mi32 : X86::MOV32mi; 1259 Register FinalStackProbed = Uses64BitFramePtr ? X86::R11 1260 : Is64Bit ? X86::R11D 1261 : X86::EAX; 1262 1263 // Setup entry block 1264 { 1265 1266 entryMBB->splice(entryMBB->end(), &MBB, MBB.begin(), MBBI); 1267 BuildMI(entryMBB, DL, TII.get(TargetOpcode::COPY), FinalStackProbed) 1268 .addReg(StackPtr) 1269 .setMIFlag(MachineInstr::FrameSetup); 1270 MachineInstr *MI = 1271 BuildMI(entryMBB, DL, TII.get(AndOp), FinalStackProbed) 1272 .addReg(FinalStackProbed) 1273 .addImm(Val) 1274 .setMIFlag(MachineInstr::FrameSetup); 1275 1276 // The EFLAGS implicit def is dead. 1277 MI->getOperand(3).setIsDead(); 1278 1279 BuildMI(entryMBB, DL, 1280 TII.get(Uses64BitFramePtr ? X86::CMP64rr : X86::CMP32rr)) 1281 .addReg(FinalStackProbed) 1282 .addReg(StackPtr) 1283 .setMIFlag(MachineInstr::FrameSetup); 1284 BuildMI(entryMBB, DL, TII.get(X86::JCC_1)) 1285 .addMBB(&MBB) 1286 .addImm(X86::COND_E) 1287 .setMIFlag(MachineInstr::FrameSetup); 1288 entryMBB->addSuccessor(headMBB); 1289 entryMBB->addSuccessor(&MBB); 1290 } 1291 1292 // Loop entry block 1293 1294 { 1295 const unsigned SUBOpc = 1296 getSUBriOpcode(Uses64BitFramePtr, StackProbeSize); 1297 BuildMI(headMBB, DL, TII.get(SUBOpc), StackPtr) 1298 .addReg(StackPtr) 1299 .addImm(StackProbeSize) 1300 .setMIFlag(MachineInstr::FrameSetup); 1301 1302 BuildMI(headMBB, DL, 1303 TII.get(Uses64BitFramePtr ? X86::CMP64rr : X86::CMP32rr)) 1304 .addReg(FinalStackProbed) 1305 .addReg(StackPtr) 1306 .setMIFlag(MachineInstr::FrameSetup); 1307 1308 // jump 1309 BuildMI(headMBB, DL, TII.get(X86::JCC_1)) 1310 .addMBB(footMBB) 1311 .addImm(X86::COND_B) 1312 .setMIFlag(MachineInstr::FrameSetup); 1313 1314 headMBB->addSuccessor(bodyMBB); 1315 headMBB->addSuccessor(footMBB); 1316 } 1317 1318 // setup loop body 1319 { 1320 addRegOffset(BuildMI(bodyMBB, DL, TII.get(MovMIOpc)) 1321 .setMIFlag(MachineInstr::FrameSetup), 1322 StackPtr, false, 0) 1323 .addImm(0) 1324 .setMIFlag(MachineInstr::FrameSetup); 1325 1326 const unsigned SUBOpc = 1327 getSUBriOpcode(Uses64BitFramePtr, StackProbeSize); 1328 BuildMI(bodyMBB, DL, TII.get(SUBOpc), StackPtr) 1329 .addReg(StackPtr) 1330 .addImm(StackProbeSize) 1331 .setMIFlag(MachineInstr::FrameSetup); 1332 1333 // cmp with stack pointer bound 1334 BuildMI(bodyMBB, DL, 1335 TII.get(Uses64BitFramePtr ? X86::CMP64rr : X86::CMP32rr)) 1336 .addReg(FinalStackProbed) 1337 .addReg(StackPtr) 1338 .setMIFlag(MachineInstr::FrameSetup); 1339 1340 // jump 1341 BuildMI(bodyMBB, DL, TII.get(X86::JCC_1)) 1342 .addMBB(bodyMBB) 1343 .addImm(X86::COND_B) 1344 .setMIFlag(MachineInstr::FrameSetup); 1345 bodyMBB->addSuccessor(bodyMBB); 1346 bodyMBB->addSuccessor(footMBB); 1347 } 1348 1349 // setup loop footer 1350 { 1351 BuildMI(footMBB, DL, TII.get(TargetOpcode::COPY), StackPtr) 1352 .addReg(FinalStackProbed) 1353 .setMIFlag(MachineInstr::FrameSetup); 1354 addRegOffset(BuildMI(footMBB, DL, TII.get(MovMIOpc)) 1355 .setMIFlag(MachineInstr::FrameSetup), 1356 StackPtr, false, 0) 1357 .addImm(0) 1358 .setMIFlag(MachineInstr::FrameSetup); 1359 footMBB->addSuccessor(&MBB); 1360 } 1361 1362 recomputeLiveIns(*headMBB); 1363 recomputeLiveIns(*bodyMBB); 1364 recomputeLiveIns(*footMBB); 1365 recomputeLiveIns(MBB); 1366 } 1367 } else { 1368 MachineInstr *MI = BuildMI(MBB, MBBI, DL, TII.get(AndOp), Reg) 1369 .addReg(Reg) 1370 .addImm(Val) 1371 .setMIFlag(MachineInstr::FrameSetup); 1372 1373 // The EFLAGS implicit def is dead. 1374 MI->getOperand(3).setIsDead(); 1375 } 1376 } 1377 1378 bool X86FrameLowering::has128ByteRedZone(const MachineFunction& MF) const { 1379 // x86-64 (non Win64) has a 128 byte red zone which is guaranteed not to be 1380 // clobbered by any interrupt handler. 1381 assert(&STI == &MF.getSubtarget<X86Subtarget>() && 1382 "MF used frame lowering for wrong subtarget"); 1383 const Function &Fn = MF.getFunction(); 1384 const bool IsWin64CC = STI.isCallingConvWin64(Fn.getCallingConv()); 1385 return Is64Bit && !IsWin64CC && !Fn.hasFnAttribute(Attribute::NoRedZone); 1386 } 1387 1388 /// Return true if we need to use the restricted Windows x64 prologue and 1389 /// epilogue code patterns that can be described with WinCFI (.seh_* 1390 /// directives). 1391 bool X86FrameLowering::isWin64Prologue(const MachineFunction &MF) const { 1392 return MF.getTarget().getMCAsmInfo()->usesWindowsCFI(); 1393 } 1394 1395 bool X86FrameLowering::needsDwarfCFI(const MachineFunction &MF) const { 1396 return !isWin64Prologue(MF) && MF.needsFrameMoves(); 1397 } 1398 1399 /// emitPrologue - Push callee-saved registers onto the stack, which 1400 /// automatically adjust the stack pointer. Adjust the stack pointer to allocate 1401 /// space for local variables. Also emit labels used by the exception handler to 1402 /// generate the exception handling frames. 1403 1404 /* 1405 Here's a gist of what gets emitted: 1406 1407 ; Establish frame pointer, if needed 1408 [if needs FP] 1409 push %rbp 1410 .cfi_def_cfa_offset 16 1411 .cfi_offset %rbp, -16 1412 .seh_pushreg %rpb 1413 mov %rsp, %rbp 1414 .cfi_def_cfa_register %rbp 1415 1416 ; Spill general-purpose registers 1417 [for all callee-saved GPRs] 1418 pushq %<reg> 1419 [if not needs FP] 1420 .cfi_def_cfa_offset (offset from RETADDR) 1421 .seh_pushreg %<reg> 1422 1423 ; If the required stack alignment > default stack alignment 1424 ; rsp needs to be re-aligned. This creates a "re-alignment gap" 1425 ; of unknown size in the stack frame. 1426 [if stack needs re-alignment] 1427 and $MASK, %rsp 1428 1429 ; Allocate space for locals 1430 [if target is Windows and allocated space > 4096 bytes] 1431 ; Windows needs special care for allocations larger 1432 ; than one page. 1433 mov $NNN, %rax 1434 call ___chkstk_ms/___chkstk 1435 sub %rax, %rsp 1436 [else] 1437 sub $NNN, %rsp 1438 1439 [if needs FP] 1440 .seh_stackalloc (size of XMM spill slots) 1441 .seh_setframe %rbp, SEHFrameOffset ; = size of all spill slots 1442 [else] 1443 .seh_stackalloc NNN 1444 1445 ; Spill XMMs 1446 ; Note, that while only Windows 64 ABI specifies XMMs as callee-preserved, 1447 ; they may get spilled on any platform, if the current function 1448 ; calls @llvm.eh.unwind.init 1449 [if needs FP] 1450 [for all callee-saved XMM registers] 1451 movaps %<xmm reg>, -MMM(%rbp) 1452 [for all callee-saved XMM registers] 1453 .seh_savexmm %<xmm reg>, (-MMM + SEHFrameOffset) 1454 ; i.e. the offset relative to (%rbp - SEHFrameOffset) 1455 [else] 1456 [for all callee-saved XMM registers] 1457 movaps %<xmm reg>, KKK(%rsp) 1458 [for all callee-saved XMM registers] 1459 .seh_savexmm %<xmm reg>, KKK 1460 1461 .seh_endprologue 1462 1463 [if needs base pointer] 1464 mov %rsp, %rbx 1465 [if needs to restore base pointer] 1466 mov %rsp, -MMM(%rbp) 1467 1468 ; Emit CFI info 1469 [if needs FP] 1470 [for all callee-saved registers] 1471 .cfi_offset %<reg>, (offset from %rbp) 1472 [else] 1473 .cfi_def_cfa_offset (offset from RETADDR) 1474 [for all callee-saved registers] 1475 .cfi_offset %<reg>, (offset from %rsp) 1476 1477 Notes: 1478 - .seh directives are emitted only for Windows 64 ABI 1479 - .cv_fpo directives are emitted on win32 when emitting CodeView 1480 - .cfi directives are emitted for all other ABIs 1481 - for 32-bit code, substitute %e?? registers for %r?? 1482 */ 1483 1484 void X86FrameLowering::emitPrologue(MachineFunction &MF, 1485 MachineBasicBlock &MBB) const { 1486 assert(&STI == &MF.getSubtarget<X86Subtarget>() && 1487 "MF used frame lowering for wrong subtarget"); 1488 MachineBasicBlock::iterator MBBI = MBB.begin(); 1489 MachineFrameInfo &MFI = MF.getFrameInfo(); 1490 const Function &Fn = MF.getFunction(); 1491 MachineModuleInfo &MMI = MF.getMMI(); 1492 X86MachineFunctionInfo *X86FI = MF.getInfo<X86MachineFunctionInfo>(); 1493 uint64_t MaxAlign = calculateMaxStackAlign(MF); // Desired stack alignment. 1494 uint64_t StackSize = MFI.getStackSize(); // Number of bytes to allocate. 1495 bool IsFunclet = MBB.isEHFuncletEntry(); 1496 EHPersonality Personality = EHPersonality::Unknown; 1497 if (Fn.hasPersonalityFn()) 1498 Personality = classifyEHPersonality(Fn.getPersonalityFn()); 1499 bool FnHasClrFunclet = 1500 MF.hasEHFunclets() && Personality == EHPersonality::CoreCLR; 1501 bool IsClrFunclet = IsFunclet && FnHasClrFunclet; 1502 bool HasFP = hasFP(MF); 1503 bool IsWin64Prologue = isWin64Prologue(MF); 1504 bool NeedsWin64CFI = IsWin64Prologue && Fn.needsUnwindTableEntry(); 1505 // FIXME: Emit FPO data for EH funclets. 1506 bool NeedsWinFPO = 1507 !IsFunclet && STI.isTargetWin32() && MMI.getModule()->getCodeViewFlag(); 1508 bool NeedsWinCFI = NeedsWin64CFI || NeedsWinFPO; 1509 bool NeedsDwarfCFI = needsDwarfCFI(MF); 1510 Register FramePtr = TRI->getFrameRegister(MF); 1511 const Register MachineFramePtr = 1512 STI.isTarget64BitILP32() 1513 ? Register(getX86SubSuperRegister(FramePtr, 64)) : FramePtr; 1514 Register BasePtr = TRI->getBaseRegister(); 1515 bool HasWinCFI = false; 1516 1517 // Debug location must be unknown since the first debug location is used 1518 // to determine the end of the prologue. 1519 DebugLoc DL; 1520 1521 // Space reserved for stack-based arguments when making a (ABI-guaranteed) 1522 // tail call. 1523 unsigned TailCallArgReserveSize = -X86FI->getTCReturnAddrDelta(); 1524 if (TailCallArgReserveSize && IsWin64Prologue) 1525 report_fatal_error("Can't handle guaranteed tail call under win64 yet"); 1526 1527 const bool EmitStackProbeCall = 1528 STI.getTargetLowering()->hasStackProbeSymbol(MF); 1529 unsigned StackProbeSize = STI.getTargetLowering()->getStackProbeSize(MF); 1530 1531 if (HasFP && X86FI->hasSwiftAsyncContext()) { 1532 switch (MF.getTarget().Options.SwiftAsyncFramePointer) { 1533 case SwiftAsyncFramePointerMode::DeploymentBased: 1534 if (STI.swiftAsyncContextIsDynamicallySet()) { 1535 // The special symbol below is absolute and has a *value* suitable to be 1536 // combined with the frame pointer directly. 1537 BuildMI(MBB, MBBI, DL, TII.get(X86::OR64rm), MachineFramePtr) 1538 .addUse(MachineFramePtr) 1539 .addUse(X86::RIP) 1540 .addImm(1) 1541 .addUse(X86::NoRegister) 1542 .addExternalSymbol("swift_async_extendedFramePointerFlags", 1543 X86II::MO_GOTPCREL) 1544 .addUse(X86::NoRegister); 1545 break; 1546 } 1547 LLVM_FALLTHROUGH; 1548 1549 case SwiftAsyncFramePointerMode::Always: 1550 BuildMI(MBB, MBBI, DL, TII.get(X86::BTS64ri8), MachineFramePtr) 1551 .addUse(MachineFramePtr) 1552 .addImm(60) 1553 .setMIFlag(MachineInstr::FrameSetup); 1554 break; 1555 1556 case SwiftAsyncFramePointerMode::Never: 1557 break; 1558 } 1559 } 1560 1561 // Re-align the stack on 64-bit if the x86-interrupt calling convention is 1562 // used and an error code was pushed, since the x86-64 ABI requires a 16-byte 1563 // stack alignment. 1564 if (Fn.getCallingConv() == CallingConv::X86_INTR && Is64Bit && 1565 Fn.arg_size() == 2) { 1566 StackSize += 8; 1567 MFI.setStackSize(StackSize); 1568 emitSPUpdate(MBB, MBBI, DL, -8, /*InEpilogue=*/false); 1569 } 1570 1571 // If this is x86-64 and the Red Zone is not disabled, if we are a leaf 1572 // function, and use up to 128 bytes of stack space, don't have a frame 1573 // pointer, calls, or dynamic alloca then we do not need to adjust the 1574 // stack pointer (we fit in the Red Zone). We also check that we don't 1575 // push and pop from the stack. 1576 if (has128ByteRedZone(MF) && !TRI->hasStackRealignment(MF) && 1577 !MFI.hasVarSizedObjects() && // No dynamic alloca. 1578 !MFI.adjustsStack() && // No calls. 1579 !EmitStackProbeCall && // No stack probes. 1580 !MFI.hasCopyImplyingStackAdjustment() && // Don't push and pop. 1581 !MF.shouldSplitStack()) { // Regular stack 1582 uint64_t MinSize = 1583 X86FI->getCalleeSavedFrameSize() - X86FI->getTCReturnAddrDelta(); 1584 if (HasFP) MinSize += SlotSize; 1585 X86FI->setUsesRedZone(MinSize > 0 || StackSize > 0); 1586 StackSize = std::max(MinSize, StackSize > 128 ? StackSize - 128 : 0); 1587 MFI.setStackSize(StackSize); 1588 } 1589 1590 // Insert stack pointer adjustment for later moving of return addr. Only 1591 // applies to tail call optimized functions where the callee argument stack 1592 // size is bigger than the callers. 1593 if (TailCallArgReserveSize != 0) { 1594 BuildStackAdjustment(MBB, MBBI, DL, -(int)TailCallArgReserveSize, 1595 /*InEpilogue=*/false) 1596 .setMIFlag(MachineInstr::FrameSetup); 1597 } 1598 1599 // Mapping for machine moves: 1600 // 1601 // DST: VirtualFP AND 1602 // SRC: VirtualFP => DW_CFA_def_cfa_offset 1603 // ELSE => DW_CFA_def_cfa 1604 // 1605 // SRC: VirtualFP AND 1606 // DST: Register => DW_CFA_def_cfa_register 1607 // 1608 // ELSE 1609 // OFFSET < 0 => DW_CFA_offset_extended_sf 1610 // REG < 64 => DW_CFA_offset + Reg 1611 // ELSE => DW_CFA_offset_extended 1612 1613 uint64_t NumBytes = 0; 1614 int stackGrowth = -SlotSize; 1615 1616 // Find the funclet establisher parameter 1617 Register Establisher = X86::NoRegister; 1618 if (IsClrFunclet) 1619 Establisher = Uses64BitFramePtr ? X86::RCX : X86::ECX; 1620 else if (IsFunclet) 1621 Establisher = Uses64BitFramePtr ? X86::RDX : X86::EDX; 1622 1623 if (IsWin64Prologue && IsFunclet && !IsClrFunclet) { 1624 // Immediately spill establisher into the home slot. 1625 // The runtime cares about this. 1626 // MOV64mr %rdx, 16(%rsp) 1627 unsigned MOVmr = Uses64BitFramePtr ? X86::MOV64mr : X86::MOV32mr; 1628 addRegOffset(BuildMI(MBB, MBBI, DL, TII.get(MOVmr)), StackPtr, true, 16) 1629 .addReg(Establisher) 1630 .setMIFlag(MachineInstr::FrameSetup); 1631 MBB.addLiveIn(Establisher); 1632 } 1633 1634 if (HasFP) { 1635 assert(MF.getRegInfo().isReserved(MachineFramePtr) && "FP reserved"); 1636 1637 // Calculate required stack adjustment. 1638 uint64_t FrameSize = StackSize - SlotSize; 1639 // If required, include space for extra hidden slot for stashing base pointer. 1640 if (X86FI->getRestoreBasePointer()) 1641 FrameSize += SlotSize; 1642 1643 NumBytes = FrameSize - 1644 (X86FI->getCalleeSavedFrameSize() + TailCallArgReserveSize); 1645 1646 // Callee-saved registers are pushed on stack before the stack is realigned. 1647 if (TRI->hasStackRealignment(MF) && !IsWin64Prologue) 1648 NumBytes = alignTo(NumBytes, MaxAlign); 1649 1650 // Save EBP/RBP into the appropriate stack slot. 1651 BuildMI(MBB, MBBI, DL, TII.get(Is64Bit ? X86::PUSH64r : X86::PUSH32r)) 1652 .addReg(MachineFramePtr, RegState::Kill) 1653 .setMIFlag(MachineInstr::FrameSetup); 1654 1655 if (NeedsDwarfCFI) { 1656 // Mark the place where EBP/RBP was saved. 1657 // Define the current CFA rule to use the provided offset. 1658 assert(StackSize); 1659 BuildCFI(MBB, MBBI, DL, 1660 MCCFIInstruction::cfiDefCfaOffset(nullptr, -2 * stackGrowth)); 1661 1662 // Change the rule for the FramePtr to be an "offset" rule. 1663 unsigned DwarfFramePtr = TRI->getDwarfRegNum(MachineFramePtr, true); 1664 BuildCFI(MBB, MBBI, DL, MCCFIInstruction::createOffset( 1665 nullptr, DwarfFramePtr, 2 * stackGrowth)); 1666 } 1667 1668 if (NeedsWinCFI) { 1669 HasWinCFI = true; 1670 BuildMI(MBB, MBBI, DL, TII.get(X86::SEH_PushReg)) 1671 .addImm(FramePtr) 1672 .setMIFlag(MachineInstr::FrameSetup); 1673 } 1674 1675 if (!IsFunclet) { 1676 if (X86FI->hasSwiftAsyncContext()) { 1677 const auto &Attrs = MF.getFunction().getAttributes(); 1678 1679 // Before we update the live frame pointer we have to ensure there's a 1680 // valid (or null) asynchronous context in its slot just before FP in 1681 // the frame record, so store it now. 1682 if (Attrs.hasAttrSomewhere(Attribute::SwiftAsync)) { 1683 // We have an initial context in r14, store it just before the frame 1684 // pointer. 1685 MBB.addLiveIn(X86::R14); 1686 BuildMI(MBB, MBBI, DL, TII.get(X86::PUSH64r)) 1687 .addReg(X86::R14) 1688 .setMIFlag(MachineInstr::FrameSetup); 1689 } else { 1690 // No initial context, store null so that there's no pointer that 1691 // could be misused. 1692 BuildMI(MBB, MBBI, DL, TII.get(X86::PUSH64i8)) 1693 .addImm(0) 1694 .setMIFlag(MachineInstr::FrameSetup); 1695 } 1696 1697 if (NeedsWinCFI) { 1698 HasWinCFI = true; 1699 BuildMI(MBB, MBBI, DL, TII.get(X86::SEH_PushReg)) 1700 .addImm(X86::R14) 1701 .setMIFlag(MachineInstr::FrameSetup); 1702 } 1703 1704 BuildMI(MBB, MBBI, DL, TII.get(X86::LEA64r), FramePtr) 1705 .addUse(X86::RSP) 1706 .addImm(1) 1707 .addUse(X86::NoRegister) 1708 .addImm(8) 1709 .addUse(X86::NoRegister) 1710 .setMIFlag(MachineInstr::FrameSetup); 1711 BuildMI(MBB, MBBI, DL, TII.get(X86::SUB64ri8), X86::RSP) 1712 .addUse(X86::RSP) 1713 .addImm(8) 1714 .setMIFlag(MachineInstr::FrameSetup); 1715 } 1716 1717 if (!IsWin64Prologue && !IsFunclet) { 1718 // Update EBP with the new base value. 1719 if (!X86FI->hasSwiftAsyncContext()) 1720 BuildMI(MBB, MBBI, DL, 1721 TII.get(Uses64BitFramePtr ? X86::MOV64rr : X86::MOV32rr), 1722 FramePtr) 1723 .addReg(StackPtr) 1724 .setMIFlag(MachineInstr::FrameSetup); 1725 1726 if (NeedsDwarfCFI) { 1727 // Mark effective beginning of when frame pointer becomes valid. 1728 // Define the current CFA to use the EBP/RBP register. 1729 unsigned DwarfFramePtr = TRI->getDwarfRegNum(MachineFramePtr, true); 1730 BuildCFI( 1731 MBB, MBBI, DL, 1732 MCCFIInstruction::createDefCfaRegister(nullptr, DwarfFramePtr)); 1733 } 1734 1735 if (NeedsWinFPO) { 1736 // .cv_fpo_setframe $FramePtr 1737 HasWinCFI = true; 1738 BuildMI(MBB, MBBI, DL, TII.get(X86::SEH_SetFrame)) 1739 .addImm(FramePtr) 1740 .addImm(0) 1741 .setMIFlag(MachineInstr::FrameSetup); 1742 } 1743 } 1744 } 1745 } else { 1746 assert(!IsFunclet && "funclets without FPs not yet implemented"); 1747 NumBytes = StackSize - 1748 (X86FI->getCalleeSavedFrameSize() + TailCallArgReserveSize); 1749 } 1750 1751 // Update the offset adjustment, which is mainly used by codeview to translate 1752 // from ESP to VFRAME relative local variable offsets. 1753 if (!IsFunclet) { 1754 if (HasFP && TRI->hasStackRealignment(MF)) 1755 MFI.setOffsetAdjustment(-NumBytes); 1756 else 1757 MFI.setOffsetAdjustment(-StackSize); 1758 } 1759 1760 // For EH funclets, only allocate enough space for outgoing calls. Save the 1761 // NumBytes value that we would've used for the parent frame. 1762 unsigned ParentFrameNumBytes = NumBytes; 1763 if (IsFunclet) 1764 NumBytes = getWinEHFuncletFrameSize(MF); 1765 1766 // Skip the callee-saved push instructions. 1767 bool PushedRegs = false; 1768 int StackOffset = 2 * stackGrowth; 1769 1770 while (MBBI != MBB.end() && 1771 MBBI->getFlag(MachineInstr::FrameSetup) && 1772 (MBBI->getOpcode() == X86::PUSH32r || 1773 MBBI->getOpcode() == X86::PUSH64r)) { 1774 PushedRegs = true; 1775 Register Reg = MBBI->getOperand(0).getReg(); 1776 ++MBBI; 1777 1778 if (!HasFP && NeedsDwarfCFI) { 1779 // Mark callee-saved push instruction. 1780 // Define the current CFA rule to use the provided offset. 1781 assert(StackSize); 1782 BuildCFI(MBB, MBBI, DL, 1783 MCCFIInstruction::cfiDefCfaOffset(nullptr, -StackOffset)); 1784 StackOffset += stackGrowth; 1785 } 1786 1787 if (NeedsWinCFI) { 1788 HasWinCFI = true; 1789 BuildMI(MBB, MBBI, DL, TII.get(X86::SEH_PushReg)) 1790 .addImm(Reg) 1791 .setMIFlag(MachineInstr::FrameSetup); 1792 } 1793 } 1794 1795 // Realign stack after we pushed callee-saved registers (so that we'll be 1796 // able to calculate their offsets from the frame pointer). 1797 // Don't do this for Win64, it needs to realign the stack after the prologue. 1798 if (!IsWin64Prologue && !IsFunclet && TRI->hasStackRealignment(MF)) { 1799 assert(HasFP && "There should be a frame pointer if stack is realigned."); 1800 BuildStackAlignAND(MBB, MBBI, DL, StackPtr, MaxAlign); 1801 1802 if (NeedsWinCFI) { 1803 HasWinCFI = true; 1804 BuildMI(MBB, MBBI, DL, TII.get(X86::SEH_StackAlign)) 1805 .addImm(MaxAlign) 1806 .setMIFlag(MachineInstr::FrameSetup); 1807 } 1808 } 1809 1810 // If there is an SUB32ri of ESP immediately before this instruction, merge 1811 // the two. This can be the case when tail call elimination is enabled and 1812 // the callee has more arguments then the caller. 1813 NumBytes -= mergeSPUpdates(MBB, MBBI, true); 1814 1815 // Adjust stack pointer: ESP -= numbytes. 1816 1817 // Windows and cygwin/mingw require a prologue helper routine when allocating 1818 // more than 4K bytes on the stack. Windows uses __chkstk and cygwin/mingw 1819 // uses __alloca. __alloca and the 32-bit version of __chkstk will probe the 1820 // stack and adjust the stack pointer in one go. The 64-bit version of 1821 // __chkstk is only responsible for probing the stack. The 64-bit prologue is 1822 // responsible for adjusting the stack pointer. Touching the stack at 4K 1823 // increments is necessary to ensure that the guard pages used by the OS 1824 // virtual memory manager are allocated in correct sequence. 1825 uint64_t AlignedNumBytes = NumBytes; 1826 if (IsWin64Prologue && !IsFunclet && TRI->hasStackRealignment(MF)) 1827 AlignedNumBytes = alignTo(AlignedNumBytes, MaxAlign); 1828 if (AlignedNumBytes >= StackProbeSize && EmitStackProbeCall) { 1829 assert(!X86FI->getUsesRedZone() && 1830 "The Red Zone is not accounted for in stack probes"); 1831 1832 // Check whether EAX is livein for this block. 1833 bool isEAXAlive = isEAXLiveIn(MBB); 1834 1835 if (isEAXAlive) { 1836 if (Is64Bit) { 1837 // Save RAX 1838 BuildMI(MBB, MBBI, DL, TII.get(X86::PUSH64r)) 1839 .addReg(X86::RAX, RegState::Kill) 1840 .setMIFlag(MachineInstr::FrameSetup); 1841 } else { 1842 // Save EAX 1843 BuildMI(MBB, MBBI, DL, TII.get(X86::PUSH32r)) 1844 .addReg(X86::EAX, RegState::Kill) 1845 .setMIFlag(MachineInstr::FrameSetup); 1846 } 1847 } 1848 1849 if (Is64Bit) { 1850 // Handle the 64-bit Windows ABI case where we need to call __chkstk. 1851 // Function prologue is responsible for adjusting the stack pointer. 1852 int64_t Alloc = isEAXAlive ? NumBytes - 8 : NumBytes; 1853 BuildMI(MBB, MBBI, DL, TII.get(getMOVriOpcode(Is64Bit, Alloc)), X86::RAX) 1854 .addImm(Alloc) 1855 .setMIFlag(MachineInstr::FrameSetup); 1856 } else { 1857 // Allocate NumBytes-4 bytes on stack in case of isEAXAlive. 1858 // We'll also use 4 already allocated bytes for EAX. 1859 BuildMI(MBB, MBBI, DL, TII.get(X86::MOV32ri), X86::EAX) 1860 .addImm(isEAXAlive ? NumBytes - 4 : NumBytes) 1861 .setMIFlag(MachineInstr::FrameSetup); 1862 } 1863 1864 // Call __chkstk, __chkstk_ms, or __alloca. 1865 emitStackProbe(MF, MBB, MBBI, DL, true); 1866 1867 if (isEAXAlive) { 1868 // Restore RAX/EAX 1869 MachineInstr *MI; 1870 if (Is64Bit) 1871 MI = addRegOffset(BuildMI(MF, DL, TII.get(X86::MOV64rm), X86::RAX), 1872 StackPtr, false, NumBytes - 8); 1873 else 1874 MI = addRegOffset(BuildMI(MF, DL, TII.get(X86::MOV32rm), X86::EAX), 1875 StackPtr, false, NumBytes - 4); 1876 MI->setFlag(MachineInstr::FrameSetup); 1877 MBB.insert(MBBI, MI); 1878 } 1879 } else if (NumBytes) { 1880 emitSPUpdate(MBB, MBBI, DL, -(int64_t)NumBytes, /*InEpilogue=*/false); 1881 } 1882 1883 if (NeedsWinCFI && NumBytes) { 1884 HasWinCFI = true; 1885 BuildMI(MBB, MBBI, DL, TII.get(X86::SEH_StackAlloc)) 1886 .addImm(NumBytes) 1887 .setMIFlag(MachineInstr::FrameSetup); 1888 } 1889 1890 int SEHFrameOffset = 0; 1891 unsigned SPOrEstablisher; 1892 if (IsFunclet) { 1893 if (IsClrFunclet) { 1894 // The establisher parameter passed to a CLR funclet is actually a pointer 1895 // to the (mostly empty) frame of its nearest enclosing funclet; we have 1896 // to find the root function establisher frame by loading the PSPSym from 1897 // the intermediate frame. 1898 unsigned PSPSlotOffset = getPSPSlotOffsetFromSP(MF); 1899 MachinePointerInfo NoInfo; 1900 MBB.addLiveIn(Establisher); 1901 addRegOffset(BuildMI(MBB, MBBI, DL, TII.get(X86::MOV64rm), Establisher), 1902 Establisher, false, PSPSlotOffset) 1903 .addMemOperand(MF.getMachineMemOperand( 1904 NoInfo, MachineMemOperand::MOLoad, SlotSize, Align(SlotSize))); 1905 ; 1906 // Save the root establisher back into the current funclet's (mostly 1907 // empty) frame, in case a sub-funclet or the GC needs it. 1908 addRegOffset(BuildMI(MBB, MBBI, DL, TII.get(X86::MOV64mr)), StackPtr, 1909 false, PSPSlotOffset) 1910 .addReg(Establisher) 1911 .addMemOperand(MF.getMachineMemOperand( 1912 NoInfo, 1913 MachineMemOperand::MOStore | MachineMemOperand::MOVolatile, 1914 SlotSize, Align(SlotSize))); 1915 } 1916 SPOrEstablisher = Establisher; 1917 } else { 1918 SPOrEstablisher = StackPtr; 1919 } 1920 1921 if (IsWin64Prologue && HasFP) { 1922 // Set RBP to a small fixed offset from RSP. In the funclet case, we base 1923 // this calculation on the incoming establisher, which holds the value of 1924 // RSP from the parent frame at the end of the prologue. 1925 SEHFrameOffset = calculateSetFPREG(ParentFrameNumBytes); 1926 if (SEHFrameOffset) 1927 addRegOffset(BuildMI(MBB, MBBI, DL, TII.get(X86::LEA64r), FramePtr), 1928 SPOrEstablisher, false, SEHFrameOffset); 1929 else 1930 BuildMI(MBB, MBBI, DL, TII.get(X86::MOV64rr), FramePtr) 1931 .addReg(SPOrEstablisher); 1932 1933 // If this is not a funclet, emit the CFI describing our frame pointer. 1934 if (NeedsWinCFI && !IsFunclet) { 1935 assert(!NeedsWinFPO && "this setframe incompatible with FPO data"); 1936 HasWinCFI = true; 1937 BuildMI(MBB, MBBI, DL, TII.get(X86::SEH_SetFrame)) 1938 .addImm(FramePtr) 1939 .addImm(SEHFrameOffset) 1940 .setMIFlag(MachineInstr::FrameSetup); 1941 if (isAsynchronousEHPersonality(Personality)) 1942 MF.getWinEHFuncInfo()->SEHSetFrameOffset = SEHFrameOffset; 1943 } 1944 } else if (IsFunclet && STI.is32Bit()) { 1945 // Reset EBP / ESI to something good for funclets. 1946 MBBI = restoreWin32EHStackPointers(MBB, MBBI, DL); 1947 // If we're a catch funclet, we can be returned to via catchret. Save ESP 1948 // into the registration node so that the runtime will restore it for us. 1949 if (!MBB.isCleanupFuncletEntry()) { 1950 assert(Personality == EHPersonality::MSVC_CXX); 1951 Register FrameReg; 1952 int FI = MF.getWinEHFuncInfo()->EHRegNodeFrameIndex; 1953 int64_t EHRegOffset = getFrameIndexReference(MF, FI, FrameReg).getFixed(); 1954 // ESP is the first field, so no extra displacement is needed. 1955 addRegOffset(BuildMI(MBB, MBBI, DL, TII.get(X86::MOV32mr)), FrameReg, 1956 false, EHRegOffset) 1957 .addReg(X86::ESP); 1958 } 1959 } 1960 1961 while (MBBI != MBB.end() && MBBI->getFlag(MachineInstr::FrameSetup)) { 1962 const MachineInstr &FrameInstr = *MBBI; 1963 ++MBBI; 1964 1965 if (NeedsWinCFI) { 1966 int FI; 1967 if (unsigned Reg = TII.isStoreToStackSlot(FrameInstr, FI)) { 1968 if (X86::FR64RegClass.contains(Reg)) { 1969 int Offset; 1970 Register IgnoredFrameReg; 1971 if (IsWin64Prologue && IsFunclet) 1972 Offset = getWin64EHFrameIndexRef(MF, FI, IgnoredFrameReg); 1973 else 1974 Offset = 1975 getFrameIndexReference(MF, FI, IgnoredFrameReg).getFixed() + 1976 SEHFrameOffset; 1977 1978 HasWinCFI = true; 1979 assert(!NeedsWinFPO && "SEH_SaveXMM incompatible with FPO data"); 1980 BuildMI(MBB, MBBI, DL, TII.get(X86::SEH_SaveXMM)) 1981 .addImm(Reg) 1982 .addImm(Offset) 1983 .setMIFlag(MachineInstr::FrameSetup); 1984 } 1985 } 1986 } 1987 } 1988 1989 if (NeedsWinCFI && HasWinCFI) 1990 BuildMI(MBB, MBBI, DL, TII.get(X86::SEH_EndPrologue)) 1991 .setMIFlag(MachineInstr::FrameSetup); 1992 1993 if (FnHasClrFunclet && !IsFunclet) { 1994 // Save the so-called Initial-SP (i.e. the value of the stack pointer 1995 // immediately after the prolog) into the PSPSlot so that funclets 1996 // and the GC can recover it. 1997 unsigned PSPSlotOffset = getPSPSlotOffsetFromSP(MF); 1998 auto PSPInfo = MachinePointerInfo::getFixedStack( 1999 MF, MF.getWinEHFuncInfo()->PSPSymFrameIdx); 2000 addRegOffset(BuildMI(MBB, MBBI, DL, TII.get(X86::MOV64mr)), StackPtr, false, 2001 PSPSlotOffset) 2002 .addReg(StackPtr) 2003 .addMemOperand(MF.getMachineMemOperand( 2004 PSPInfo, MachineMemOperand::MOStore | MachineMemOperand::MOVolatile, 2005 SlotSize, Align(SlotSize))); 2006 } 2007 2008 // Realign stack after we spilled callee-saved registers (so that we'll be 2009 // able to calculate their offsets from the frame pointer). 2010 // Win64 requires aligning the stack after the prologue. 2011 if (IsWin64Prologue && TRI->hasStackRealignment(MF)) { 2012 assert(HasFP && "There should be a frame pointer if stack is realigned."); 2013 BuildStackAlignAND(MBB, MBBI, DL, SPOrEstablisher, MaxAlign); 2014 } 2015 2016 // We already dealt with stack realignment and funclets above. 2017 if (IsFunclet && STI.is32Bit()) 2018 return; 2019 2020 // If we need a base pointer, set it up here. It's whatever the value 2021 // of the stack pointer is at this point. Any variable size objects 2022 // will be allocated after this, so we can still use the base pointer 2023 // to reference locals. 2024 if (TRI->hasBasePointer(MF)) { 2025 // Update the base pointer with the current stack pointer. 2026 unsigned Opc = Uses64BitFramePtr ? X86::MOV64rr : X86::MOV32rr; 2027 BuildMI(MBB, MBBI, DL, TII.get(Opc), BasePtr) 2028 .addReg(SPOrEstablisher) 2029 .setMIFlag(MachineInstr::FrameSetup); 2030 if (X86FI->getRestoreBasePointer()) { 2031 // Stash value of base pointer. Saving RSP instead of EBP shortens 2032 // dependence chain. Used by SjLj EH. 2033 unsigned Opm = Uses64BitFramePtr ? X86::MOV64mr : X86::MOV32mr; 2034 addRegOffset(BuildMI(MBB, MBBI, DL, TII.get(Opm)), 2035 FramePtr, true, X86FI->getRestoreBasePointerOffset()) 2036 .addReg(SPOrEstablisher) 2037 .setMIFlag(MachineInstr::FrameSetup); 2038 } 2039 2040 if (X86FI->getHasSEHFramePtrSave() && !IsFunclet) { 2041 // Stash the value of the frame pointer relative to the base pointer for 2042 // Win32 EH. This supports Win32 EH, which does the inverse of the above: 2043 // it recovers the frame pointer from the base pointer rather than the 2044 // other way around. 2045 unsigned Opm = Uses64BitFramePtr ? X86::MOV64mr : X86::MOV32mr; 2046 Register UsedReg; 2047 int Offset = 2048 getFrameIndexReference(MF, X86FI->getSEHFramePtrSaveIndex(), UsedReg) 2049 .getFixed(); 2050 assert(UsedReg == BasePtr); 2051 addRegOffset(BuildMI(MBB, MBBI, DL, TII.get(Opm)), UsedReg, true, Offset) 2052 .addReg(FramePtr) 2053 .setMIFlag(MachineInstr::FrameSetup); 2054 } 2055 } 2056 2057 if (((!HasFP && NumBytes) || PushedRegs) && NeedsDwarfCFI) { 2058 // Mark end of stack pointer adjustment. 2059 if (!HasFP && NumBytes) { 2060 // Define the current CFA rule to use the provided offset. 2061 assert(StackSize); 2062 BuildCFI( 2063 MBB, MBBI, DL, 2064 MCCFIInstruction::cfiDefCfaOffset(nullptr, StackSize - stackGrowth)); 2065 } 2066 2067 // Emit DWARF info specifying the offsets of the callee-saved registers. 2068 emitCalleeSavedFrameMoves(MBB, MBBI, DL, true); 2069 } 2070 2071 // X86 Interrupt handling function cannot assume anything about the direction 2072 // flag (DF in EFLAGS register). Clear this flag by creating "cld" instruction 2073 // in each prologue of interrupt handler function. 2074 // 2075 // FIXME: Create "cld" instruction only in these cases: 2076 // 1. The interrupt handling function uses any of the "rep" instructions. 2077 // 2. Interrupt handling function calls another function. 2078 // 2079 if (Fn.getCallingConv() == CallingConv::X86_INTR) 2080 BuildMI(MBB, MBBI, DL, TII.get(X86::CLD)) 2081 .setMIFlag(MachineInstr::FrameSetup); 2082 2083 // At this point we know if the function has WinCFI or not. 2084 MF.setHasWinCFI(HasWinCFI); 2085 } 2086 2087 bool X86FrameLowering::canUseLEAForSPInEpilogue( 2088 const MachineFunction &MF) const { 2089 // We can't use LEA instructions for adjusting the stack pointer if we don't 2090 // have a frame pointer in the Win64 ABI. Only ADD instructions may be used 2091 // to deallocate the stack. 2092 // This means that we can use LEA for SP in two situations: 2093 // 1. We *aren't* using the Win64 ABI which means we are free to use LEA. 2094 // 2. We *have* a frame pointer which means we are permitted to use LEA. 2095 return !MF.getTarget().getMCAsmInfo()->usesWindowsCFI() || hasFP(MF); 2096 } 2097 2098 static bool isFuncletReturnInstr(MachineInstr &MI) { 2099 switch (MI.getOpcode()) { 2100 case X86::CATCHRET: 2101 case X86::CLEANUPRET: 2102 return true; 2103 default: 2104 return false; 2105 } 2106 llvm_unreachable("impossible"); 2107 } 2108 2109 // CLR funclets use a special "Previous Stack Pointer Symbol" slot on the 2110 // stack. It holds a pointer to the bottom of the root function frame. The 2111 // establisher frame pointer passed to a nested funclet may point to the 2112 // (mostly empty) frame of its parent funclet, but it will need to find 2113 // the frame of the root function to access locals. To facilitate this, 2114 // every funclet copies the pointer to the bottom of the root function 2115 // frame into a PSPSym slot in its own (mostly empty) stack frame. Using the 2116 // same offset for the PSPSym in the root function frame that's used in the 2117 // funclets' frames allows each funclet to dynamically accept any ancestor 2118 // frame as its establisher argument (the runtime doesn't guarantee the 2119 // immediate parent for some reason lost to history), and also allows the GC, 2120 // which uses the PSPSym for some bookkeeping, to find it in any funclet's 2121 // frame with only a single offset reported for the entire method. 2122 unsigned 2123 X86FrameLowering::getPSPSlotOffsetFromSP(const MachineFunction &MF) const { 2124 const WinEHFuncInfo &Info = *MF.getWinEHFuncInfo(); 2125 Register SPReg; 2126 int Offset = getFrameIndexReferencePreferSP(MF, Info.PSPSymFrameIdx, SPReg, 2127 /*IgnoreSPUpdates*/ true) 2128 .getFixed(); 2129 assert(Offset >= 0 && SPReg == TRI->getStackRegister()); 2130 return static_cast<unsigned>(Offset); 2131 } 2132 2133 unsigned 2134 X86FrameLowering::getWinEHFuncletFrameSize(const MachineFunction &MF) const { 2135 const X86MachineFunctionInfo *X86FI = MF.getInfo<X86MachineFunctionInfo>(); 2136 // This is the size of the pushed CSRs. 2137 unsigned CSSize = X86FI->getCalleeSavedFrameSize(); 2138 // This is the size of callee saved XMMs. 2139 const auto& WinEHXMMSlotInfo = X86FI->getWinEHXMMSlotInfo(); 2140 unsigned XMMSize = WinEHXMMSlotInfo.size() * 2141 TRI->getSpillSize(X86::VR128RegClass); 2142 // This is the amount of stack a funclet needs to allocate. 2143 unsigned UsedSize; 2144 EHPersonality Personality = 2145 classifyEHPersonality(MF.getFunction().getPersonalityFn()); 2146 if (Personality == EHPersonality::CoreCLR) { 2147 // CLR funclets need to hold enough space to include the PSPSym, at the 2148 // same offset from the stack pointer (immediately after the prolog) as it 2149 // resides at in the main function. 2150 UsedSize = getPSPSlotOffsetFromSP(MF) + SlotSize; 2151 } else { 2152 // Other funclets just need enough stack for outgoing call arguments. 2153 UsedSize = MF.getFrameInfo().getMaxCallFrameSize(); 2154 } 2155 // RBP is not included in the callee saved register block. After pushing RBP, 2156 // everything is 16 byte aligned. Everything we allocate before an outgoing 2157 // call must also be 16 byte aligned. 2158 unsigned FrameSizeMinusRBP = alignTo(CSSize + UsedSize, getStackAlign()); 2159 // Subtract out the size of the callee saved registers. This is how much stack 2160 // each funclet will allocate. 2161 return FrameSizeMinusRBP + XMMSize - CSSize; 2162 } 2163 2164 static bool isTailCallOpcode(unsigned Opc) { 2165 return Opc == X86::TCRETURNri || Opc == X86::TCRETURNdi || 2166 Opc == X86::TCRETURNmi || 2167 Opc == X86::TCRETURNri64 || Opc == X86::TCRETURNdi64 || 2168 Opc == X86::TCRETURNmi64; 2169 } 2170 2171 void X86FrameLowering::emitEpilogue(MachineFunction &MF, 2172 MachineBasicBlock &MBB) const { 2173 const MachineFrameInfo &MFI = MF.getFrameInfo(); 2174 X86MachineFunctionInfo *X86FI = MF.getInfo<X86MachineFunctionInfo>(); 2175 MachineBasicBlock::iterator Terminator = MBB.getFirstTerminator(); 2176 MachineBasicBlock::iterator MBBI = Terminator; 2177 DebugLoc DL; 2178 if (MBBI != MBB.end()) 2179 DL = MBBI->getDebugLoc(); 2180 // standard x86_64 and NaCl use 64-bit frame/stack pointers, x32 - 32-bit. 2181 const bool Is64BitILP32 = STI.isTarget64BitILP32(); 2182 Register FramePtr = TRI->getFrameRegister(MF); 2183 Register MachineFramePtr = 2184 Is64BitILP32 ? Register(getX86SubSuperRegister(FramePtr, 64)) : FramePtr; 2185 2186 bool IsWin64Prologue = MF.getTarget().getMCAsmInfo()->usesWindowsCFI(); 2187 bool NeedsWin64CFI = 2188 IsWin64Prologue && MF.getFunction().needsUnwindTableEntry(); 2189 bool IsFunclet = MBBI == MBB.end() ? false : isFuncletReturnInstr(*MBBI); 2190 2191 // Get the number of bytes to allocate from the FrameInfo. 2192 uint64_t StackSize = MFI.getStackSize(); 2193 uint64_t MaxAlign = calculateMaxStackAlign(MF); 2194 unsigned CSSize = X86FI->getCalleeSavedFrameSize(); 2195 unsigned TailCallArgReserveSize = -X86FI->getTCReturnAddrDelta(); 2196 bool HasFP = hasFP(MF); 2197 uint64_t NumBytes = 0; 2198 2199 bool NeedsDwarfCFI = (!MF.getTarget().getTargetTriple().isOSDarwin() && 2200 !MF.getTarget().getTargetTriple().isOSWindows()) && 2201 MF.needsFrameMoves(); 2202 2203 if (IsFunclet) { 2204 assert(HasFP && "EH funclets without FP not yet implemented"); 2205 NumBytes = getWinEHFuncletFrameSize(MF); 2206 } else if (HasFP) { 2207 // Calculate required stack adjustment. 2208 uint64_t FrameSize = StackSize - SlotSize; 2209 NumBytes = FrameSize - CSSize - TailCallArgReserveSize; 2210 2211 // Callee-saved registers were pushed on stack before the stack was 2212 // realigned. 2213 if (TRI->hasStackRealignment(MF) && !IsWin64Prologue) 2214 NumBytes = alignTo(FrameSize, MaxAlign); 2215 } else { 2216 NumBytes = StackSize - CSSize - TailCallArgReserveSize; 2217 } 2218 uint64_t SEHStackAllocAmt = NumBytes; 2219 2220 // AfterPop is the position to insert .cfi_restore. 2221 MachineBasicBlock::iterator AfterPop = MBBI; 2222 if (HasFP) { 2223 if (X86FI->hasSwiftAsyncContext()) { 2224 // Discard the context. 2225 int Offset = 16 + mergeSPUpdates(MBB, MBBI, true); 2226 emitSPUpdate(MBB, MBBI, DL, Offset, /*InEpilogue*/true); 2227 } 2228 // Pop EBP. 2229 BuildMI(MBB, MBBI, DL, TII.get(Is64Bit ? X86::POP64r : X86::POP32r), 2230 MachineFramePtr) 2231 .setMIFlag(MachineInstr::FrameDestroy); 2232 2233 // We need to reset FP to its untagged state on return. Bit 60 is currently 2234 // used to show the presence of an extended frame. 2235 if (X86FI->hasSwiftAsyncContext()) { 2236 BuildMI(MBB, MBBI, DL, TII.get(X86::BTR64ri8), 2237 MachineFramePtr) 2238 .addUse(MachineFramePtr) 2239 .addImm(60) 2240 .setMIFlag(MachineInstr::FrameDestroy); 2241 } 2242 2243 if (NeedsDwarfCFI) { 2244 unsigned DwarfStackPtr = 2245 TRI->getDwarfRegNum(Is64Bit ? X86::RSP : X86::ESP, true); 2246 BuildCFI(MBB, MBBI, DL, 2247 MCCFIInstruction::cfiDefCfa(nullptr, DwarfStackPtr, SlotSize)); 2248 if (!MBB.succ_empty() && !MBB.isReturnBlock()) { 2249 unsigned DwarfFramePtr = TRI->getDwarfRegNum(MachineFramePtr, true); 2250 BuildCFI(MBB, AfterPop, DL, 2251 MCCFIInstruction::createRestore(nullptr, DwarfFramePtr)); 2252 --MBBI; 2253 --AfterPop; 2254 } 2255 --MBBI; 2256 } 2257 } 2258 2259 MachineBasicBlock::iterator FirstCSPop = MBBI; 2260 // Skip the callee-saved pop instructions. 2261 while (MBBI != MBB.begin()) { 2262 MachineBasicBlock::iterator PI = std::prev(MBBI); 2263 unsigned Opc = PI->getOpcode(); 2264 2265 if (Opc != X86::DBG_VALUE && !PI->isTerminator()) { 2266 if ((Opc != X86::POP32r || !PI->getFlag(MachineInstr::FrameDestroy)) && 2267 (Opc != X86::POP64r || !PI->getFlag(MachineInstr::FrameDestroy)) && 2268 (Opc != X86::BTR64ri8 || !PI->getFlag(MachineInstr::FrameDestroy)) && 2269 (Opc != X86::ADD64ri8 || !PI->getFlag(MachineInstr::FrameDestroy))) 2270 break; 2271 FirstCSPop = PI; 2272 } 2273 2274 --MBBI; 2275 } 2276 MBBI = FirstCSPop; 2277 2278 if (IsFunclet && Terminator->getOpcode() == X86::CATCHRET) 2279 emitCatchRetReturnValue(MBB, FirstCSPop, &*Terminator); 2280 2281 if (MBBI != MBB.end()) 2282 DL = MBBI->getDebugLoc(); 2283 // If there is an ADD32ri or SUB32ri of ESP immediately before this 2284 // instruction, merge the two instructions. 2285 if (NumBytes || MFI.hasVarSizedObjects()) 2286 NumBytes += mergeSPUpdates(MBB, MBBI, true); 2287 2288 // If dynamic alloca is used, then reset esp to point to the last callee-saved 2289 // slot before popping them off! Same applies for the case, when stack was 2290 // realigned. Don't do this if this was a funclet epilogue, since the funclets 2291 // will not do realignment or dynamic stack allocation. 2292 if (((TRI->hasStackRealignment(MF)) || MFI.hasVarSizedObjects()) && 2293 !IsFunclet) { 2294 if (TRI->hasStackRealignment(MF)) 2295 MBBI = FirstCSPop; 2296 unsigned SEHFrameOffset = calculateSetFPREG(SEHStackAllocAmt); 2297 uint64_t LEAAmount = 2298 IsWin64Prologue ? SEHStackAllocAmt - SEHFrameOffset : -CSSize; 2299 2300 if (X86FI->hasSwiftAsyncContext()) 2301 LEAAmount -= 16; 2302 2303 // There are only two legal forms of epilogue: 2304 // - add SEHAllocationSize, %rsp 2305 // - lea SEHAllocationSize(%FramePtr), %rsp 2306 // 2307 // 'mov %FramePtr, %rsp' will not be recognized as an epilogue sequence. 2308 // However, we may use this sequence if we have a frame pointer because the 2309 // effects of the prologue can safely be undone. 2310 if (LEAAmount != 0) { 2311 unsigned Opc = getLEArOpcode(Uses64BitFramePtr); 2312 addRegOffset(BuildMI(MBB, MBBI, DL, TII.get(Opc), StackPtr), 2313 FramePtr, false, LEAAmount); 2314 --MBBI; 2315 } else { 2316 unsigned Opc = (Uses64BitFramePtr ? X86::MOV64rr : X86::MOV32rr); 2317 BuildMI(MBB, MBBI, DL, TII.get(Opc), StackPtr) 2318 .addReg(FramePtr); 2319 --MBBI; 2320 } 2321 } else if (NumBytes) { 2322 // Adjust stack pointer back: ESP += numbytes. 2323 emitSPUpdate(MBB, MBBI, DL, NumBytes, /*InEpilogue=*/true); 2324 if (!HasFP && NeedsDwarfCFI) { 2325 // Define the current CFA rule to use the provided offset. 2326 BuildCFI(MBB, MBBI, DL, 2327 MCCFIInstruction::cfiDefCfaOffset( 2328 nullptr, CSSize + TailCallArgReserveSize + SlotSize)); 2329 } 2330 --MBBI; 2331 } 2332 2333 // Windows unwinder will not invoke function's exception handler if IP is 2334 // either in prologue or in epilogue. This behavior causes a problem when a 2335 // call immediately precedes an epilogue, because the return address points 2336 // into the epilogue. To cope with that, we insert an epilogue marker here, 2337 // then replace it with a 'nop' if it ends up immediately after a CALL in the 2338 // final emitted code. 2339 if (NeedsWin64CFI && MF.hasWinCFI()) 2340 BuildMI(MBB, MBBI, DL, TII.get(X86::SEH_Epilogue)); 2341 2342 if (!HasFP && NeedsDwarfCFI) { 2343 MBBI = FirstCSPop; 2344 int64_t Offset = -CSSize - SlotSize; 2345 // Mark callee-saved pop instruction. 2346 // Define the current CFA rule to use the provided offset. 2347 while (MBBI != MBB.end()) { 2348 MachineBasicBlock::iterator PI = MBBI; 2349 unsigned Opc = PI->getOpcode(); 2350 ++MBBI; 2351 if (Opc == X86::POP32r || Opc == X86::POP64r) { 2352 Offset += SlotSize; 2353 BuildCFI(MBB, MBBI, DL, 2354 MCCFIInstruction::cfiDefCfaOffset(nullptr, -Offset)); 2355 } 2356 } 2357 } 2358 2359 // Emit DWARF info specifying the restores of the callee-saved registers. 2360 // For epilogue with return inside or being other block without successor, 2361 // no need to generate .cfi_restore for callee-saved registers. 2362 if (NeedsDwarfCFI && !MBB.succ_empty()) 2363 emitCalleeSavedFrameMoves(MBB, AfterPop, DL, false); 2364 2365 if (Terminator == MBB.end() || !isTailCallOpcode(Terminator->getOpcode())) { 2366 // Add the return addr area delta back since we are not tail calling. 2367 int Offset = -1 * X86FI->getTCReturnAddrDelta(); 2368 assert(Offset >= 0 && "TCDelta should never be positive"); 2369 if (Offset) { 2370 // Check for possible merge with preceding ADD instruction. 2371 Offset += mergeSPUpdates(MBB, Terminator, true); 2372 emitSPUpdate(MBB, Terminator, DL, Offset, /*InEpilogue=*/true); 2373 } 2374 } 2375 2376 // Emit tilerelease for AMX kernel. 2377 if (X86FI->hasVirtualTileReg()) 2378 BuildMI(MBB, Terminator, DL, TII.get(X86::TILERELEASE)); 2379 } 2380 2381 StackOffset X86FrameLowering::getFrameIndexReference(const MachineFunction &MF, 2382 int FI, 2383 Register &FrameReg) const { 2384 const MachineFrameInfo &MFI = MF.getFrameInfo(); 2385 2386 bool IsFixed = MFI.isFixedObjectIndex(FI); 2387 // We can't calculate offset from frame pointer if the stack is realigned, 2388 // so enforce usage of stack/base pointer. The base pointer is used when we 2389 // have dynamic allocas in addition to dynamic realignment. 2390 if (TRI->hasBasePointer(MF)) 2391 FrameReg = IsFixed ? TRI->getFramePtr() : TRI->getBaseRegister(); 2392 else if (TRI->hasStackRealignment(MF)) 2393 FrameReg = IsFixed ? TRI->getFramePtr() : TRI->getStackRegister(); 2394 else 2395 FrameReg = TRI->getFrameRegister(MF); 2396 2397 // Offset will hold the offset from the stack pointer at function entry to the 2398 // object. 2399 // We need to factor in additional offsets applied during the prologue to the 2400 // frame, base, and stack pointer depending on which is used. 2401 int Offset = MFI.getObjectOffset(FI) - getOffsetOfLocalArea(); 2402 const X86MachineFunctionInfo *X86FI = MF.getInfo<X86MachineFunctionInfo>(); 2403 unsigned CSSize = X86FI->getCalleeSavedFrameSize(); 2404 uint64_t StackSize = MFI.getStackSize(); 2405 bool IsWin64Prologue = MF.getTarget().getMCAsmInfo()->usesWindowsCFI(); 2406 int64_t FPDelta = 0; 2407 2408 // In an x86 interrupt, remove the offset we added to account for the return 2409 // address from any stack object allocated in the caller's frame. Interrupts 2410 // do not have a standard return address. Fixed objects in the current frame, 2411 // such as SSE register spills, should not get this treatment. 2412 if (MF.getFunction().getCallingConv() == CallingConv::X86_INTR && 2413 Offset >= 0) { 2414 Offset += getOffsetOfLocalArea(); 2415 } 2416 2417 if (IsWin64Prologue) { 2418 assert(!MFI.hasCalls() || (StackSize % 16) == 8); 2419 2420 // Calculate required stack adjustment. 2421 uint64_t FrameSize = StackSize - SlotSize; 2422 // If required, include space for extra hidden slot for stashing base pointer. 2423 if (X86FI->getRestoreBasePointer()) 2424 FrameSize += SlotSize; 2425 uint64_t NumBytes = FrameSize - CSSize; 2426 2427 uint64_t SEHFrameOffset = calculateSetFPREG(NumBytes); 2428 if (FI && FI == X86FI->getFAIndex()) 2429 return StackOffset::getFixed(-SEHFrameOffset); 2430 2431 // FPDelta is the offset from the "traditional" FP location of the old base 2432 // pointer followed by return address and the location required by the 2433 // restricted Win64 prologue. 2434 // Add FPDelta to all offsets below that go through the frame pointer. 2435 FPDelta = FrameSize - SEHFrameOffset; 2436 assert((!MFI.hasCalls() || (FPDelta % 16) == 0) && 2437 "FPDelta isn't aligned per the Win64 ABI!"); 2438 } 2439 2440 if (FrameReg == TRI->getFramePtr()) { 2441 // Skip saved EBP/RBP 2442 Offset += SlotSize; 2443 2444 // Account for restricted Windows prologue. 2445 Offset += FPDelta; 2446 2447 // Skip the RETADDR move area 2448 int TailCallReturnAddrDelta = X86FI->getTCReturnAddrDelta(); 2449 if (TailCallReturnAddrDelta < 0) 2450 Offset -= TailCallReturnAddrDelta; 2451 2452 return StackOffset::getFixed(Offset); 2453 } 2454 2455 // FrameReg is either the stack pointer or a base pointer. But the base is 2456 // located at the end of the statically known StackSize so the distinction 2457 // doesn't really matter. 2458 if (TRI->hasStackRealignment(MF) || TRI->hasBasePointer(MF)) 2459 assert(isAligned(MFI.getObjectAlign(FI), -(Offset + StackSize))); 2460 return StackOffset::getFixed(Offset + StackSize); 2461 } 2462 2463 int X86FrameLowering::getWin64EHFrameIndexRef(const MachineFunction &MF, int FI, 2464 Register &FrameReg) const { 2465 const MachineFrameInfo &MFI = MF.getFrameInfo(); 2466 const X86MachineFunctionInfo *X86FI = MF.getInfo<X86MachineFunctionInfo>(); 2467 const auto& WinEHXMMSlotInfo = X86FI->getWinEHXMMSlotInfo(); 2468 const auto it = WinEHXMMSlotInfo.find(FI); 2469 2470 if (it == WinEHXMMSlotInfo.end()) 2471 return getFrameIndexReference(MF, FI, FrameReg).getFixed(); 2472 2473 FrameReg = TRI->getStackRegister(); 2474 return alignDown(MFI.getMaxCallFrameSize(), getStackAlign().value()) + 2475 it->second; 2476 } 2477 2478 StackOffset 2479 X86FrameLowering::getFrameIndexReferenceSP(const MachineFunction &MF, int FI, 2480 Register &FrameReg, 2481 int Adjustment) const { 2482 const MachineFrameInfo &MFI = MF.getFrameInfo(); 2483 FrameReg = TRI->getStackRegister(); 2484 return StackOffset::getFixed(MFI.getObjectOffset(FI) - 2485 getOffsetOfLocalArea() + Adjustment); 2486 } 2487 2488 StackOffset 2489 X86FrameLowering::getFrameIndexReferencePreferSP(const MachineFunction &MF, 2490 int FI, Register &FrameReg, 2491 bool IgnoreSPUpdates) const { 2492 2493 const MachineFrameInfo &MFI = MF.getFrameInfo(); 2494 // Does not include any dynamic realign. 2495 const uint64_t StackSize = MFI.getStackSize(); 2496 // LLVM arranges the stack as follows: 2497 // ... 2498 // ARG2 2499 // ARG1 2500 // RETADDR 2501 // PUSH RBP <-- RBP points here 2502 // PUSH CSRs 2503 // ~~~~~~~ <-- possible stack realignment (non-win64) 2504 // ... 2505 // STACK OBJECTS 2506 // ... <-- RSP after prologue points here 2507 // ~~~~~~~ <-- possible stack realignment (win64) 2508 // 2509 // if (hasVarSizedObjects()): 2510 // ... <-- "base pointer" (ESI/RBX) points here 2511 // DYNAMIC ALLOCAS 2512 // ... <-- RSP points here 2513 // 2514 // Case 1: In the simple case of no stack realignment and no dynamic 2515 // allocas, both "fixed" stack objects (arguments and CSRs) are addressable 2516 // with fixed offsets from RSP. 2517 // 2518 // Case 2: In the case of stack realignment with no dynamic allocas, fixed 2519 // stack objects are addressed with RBP and regular stack objects with RSP. 2520 // 2521 // Case 3: In the case of dynamic allocas and stack realignment, RSP is used 2522 // to address stack arguments for outgoing calls and nothing else. The "base 2523 // pointer" points to local variables, and RBP points to fixed objects. 2524 // 2525 // In cases 2 and 3, we can only answer for non-fixed stack objects, and the 2526 // answer we give is relative to the SP after the prologue, and not the 2527 // SP in the middle of the function. 2528 2529 if (MFI.isFixedObjectIndex(FI) && TRI->hasStackRealignment(MF) && 2530 !STI.isTargetWin64()) 2531 return getFrameIndexReference(MF, FI, FrameReg); 2532 2533 // If !hasReservedCallFrame the function might have SP adjustement in the 2534 // body. So, even though the offset is statically known, it depends on where 2535 // we are in the function. 2536 if (!IgnoreSPUpdates && !hasReservedCallFrame(MF)) 2537 return getFrameIndexReference(MF, FI, FrameReg); 2538 2539 // We don't handle tail calls, and shouldn't be seeing them either. 2540 assert(MF.getInfo<X86MachineFunctionInfo>()->getTCReturnAddrDelta() >= 0 && 2541 "we don't handle this case!"); 2542 2543 // This is how the math works out: 2544 // 2545 // %rsp grows (i.e. gets lower) left to right. Each box below is 2546 // one word (eight bytes). Obj0 is the stack slot we're trying to 2547 // get to. 2548 // 2549 // ---------------------------------- 2550 // | BP | Obj0 | Obj1 | ... | ObjN | 2551 // ---------------------------------- 2552 // ^ ^ ^ ^ 2553 // A B C E 2554 // 2555 // A is the incoming stack pointer. 2556 // (B - A) is the local area offset (-8 for x86-64) [1] 2557 // (C - A) is the Offset returned by MFI.getObjectOffset for Obj0 [2] 2558 // 2559 // |(E - B)| is the StackSize (absolute value, positive). For a 2560 // stack that grown down, this works out to be (B - E). [3] 2561 // 2562 // E is also the value of %rsp after stack has been set up, and we 2563 // want (C - E) -- the value we can add to %rsp to get to Obj0. Now 2564 // (C - E) == (C - A) - (B - A) + (B - E) 2565 // { Using [1], [2] and [3] above } 2566 // == getObjectOffset - LocalAreaOffset + StackSize 2567 2568 return getFrameIndexReferenceSP(MF, FI, FrameReg, StackSize); 2569 } 2570 2571 bool X86FrameLowering::assignCalleeSavedSpillSlots( 2572 MachineFunction &MF, const TargetRegisterInfo *TRI, 2573 std::vector<CalleeSavedInfo> &CSI) const { 2574 MachineFrameInfo &MFI = MF.getFrameInfo(); 2575 X86MachineFunctionInfo *X86FI = MF.getInfo<X86MachineFunctionInfo>(); 2576 2577 unsigned CalleeSavedFrameSize = 0; 2578 unsigned XMMCalleeSavedFrameSize = 0; 2579 auto &WinEHXMMSlotInfo = X86FI->getWinEHXMMSlotInfo(); 2580 int SpillSlotOffset = getOffsetOfLocalArea() + X86FI->getTCReturnAddrDelta(); 2581 2582 int64_t TailCallReturnAddrDelta = X86FI->getTCReturnAddrDelta(); 2583 2584 if (TailCallReturnAddrDelta < 0) { 2585 // create RETURNADDR area 2586 // arg 2587 // arg 2588 // RETADDR 2589 // { ... 2590 // RETADDR area 2591 // ... 2592 // } 2593 // [EBP] 2594 MFI.CreateFixedObject(-TailCallReturnAddrDelta, 2595 TailCallReturnAddrDelta - SlotSize, true); 2596 } 2597 2598 // Spill the BasePtr if it's used. 2599 if (this->TRI->hasBasePointer(MF)) { 2600 // Allocate a spill slot for EBP if we have a base pointer and EH funclets. 2601 if (MF.hasEHFunclets()) { 2602 int FI = MFI.CreateSpillStackObject(SlotSize, Align(SlotSize)); 2603 X86FI->setHasSEHFramePtrSave(true); 2604 X86FI->setSEHFramePtrSaveIndex(FI); 2605 } 2606 } 2607 2608 if (hasFP(MF)) { 2609 // emitPrologue always spills frame register the first thing. 2610 SpillSlotOffset -= SlotSize; 2611 MFI.CreateFixedSpillStackObject(SlotSize, SpillSlotOffset); 2612 2613 // The async context lives directly before the frame pointer, and we 2614 // allocate a second slot to preserve stack alignment. 2615 if (X86FI->hasSwiftAsyncContext()) { 2616 SpillSlotOffset -= SlotSize; 2617 MFI.CreateFixedSpillStackObject(SlotSize, SpillSlotOffset); 2618 SpillSlotOffset -= SlotSize; 2619 } 2620 2621 // Since emitPrologue and emitEpilogue will handle spilling and restoring of 2622 // the frame register, we can delete it from CSI list and not have to worry 2623 // about avoiding it later. 2624 Register FPReg = TRI->getFrameRegister(MF); 2625 for (unsigned i = 0; i < CSI.size(); ++i) { 2626 if (TRI->regsOverlap(CSI[i].getReg(),FPReg)) { 2627 CSI.erase(CSI.begin() + i); 2628 break; 2629 } 2630 } 2631 } 2632 2633 // Assign slots for GPRs. It increases frame size. 2634 for (CalleeSavedInfo &I : llvm::reverse(CSI)) { 2635 Register Reg = I.getReg(); 2636 2637 if (!X86::GR64RegClass.contains(Reg) && !X86::GR32RegClass.contains(Reg)) 2638 continue; 2639 2640 SpillSlotOffset -= SlotSize; 2641 CalleeSavedFrameSize += SlotSize; 2642 2643 int SlotIndex = MFI.CreateFixedSpillStackObject(SlotSize, SpillSlotOffset); 2644 I.setFrameIdx(SlotIndex); 2645 } 2646 2647 X86FI->setCalleeSavedFrameSize(CalleeSavedFrameSize); 2648 MFI.setCVBytesOfCalleeSavedRegisters(CalleeSavedFrameSize); 2649 2650 // Assign slots for XMMs. 2651 for (CalleeSavedInfo &I : llvm::reverse(CSI)) { 2652 Register Reg = I.getReg(); 2653 if (X86::GR64RegClass.contains(Reg) || X86::GR32RegClass.contains(Reg)) 2654 continue; 2655 2656 // If this is k-register make sure we lookup via the largest legal type. 2657 MVT VT = MVT::Other; 2658 if (X86::VK16RegClass.contains(Reg)) 2659 VT = STI.hasBWI() ? MVT::v64i1 : MVT::v16i1; 2660 2661 const TargetRegisterClass *RC = TRI->getMinimalPhysRegClass(Reg, VT); 2662 unsigned Size = TRI->getSpillSize(*RC); 2663 Align Alignment = TRI->getSpillAlign(*RC); 2664 // ensure alignment 2665 assert(SpillSlotOffset < 0 && "SpillSlotOffset should always < 0 on X86"); 2666 SpillSlotOffset = -alignTo(-SpillSlotOffset, Alignment); 2667 2668 // spill into slot 2669 SpillSlotOffset -= Size; 2670 int SlotIndex = MFI.CreateFixedSpillStackObject(Size, SpillSlotOffset); 2671 I.setFrameIdx(SlotIndex); 2672 MFI.ensureMaxAlignment(Alignment); 2673 2674 // Save the start offset and size of XMM in stack frame for funclets. 2675 if (X86::VR128RegClass.contains(Reg)) { 2676 WinEHXMMSlotInfo[SlotIndex] = XMMCalleeSavedFrameSize; 2677 XMMCalleeSavedFrameSize += Size; 2678 } 2679 } 2680 2681 return true; 2682 } 2683 2684 bool X86FrameLowering::spillCalleeSavedRegisters( 2685 MachineBasicBlock &MBB, MachineBasicBlock::iterator MI, 2686 ArrayRef<CalleeSavedInfo> CSI, const TargetRegisterInfo *TRI) const { 2687 DebugLoc DL = MBB.findDebugLoc(MI); 2688 2689 // Don't save CSRs in 32-bit EH funclets. The caller saves EBX, EBP, ESI, EDI 2690 // for us, and there are no XMM CSRs on Win32. 2691 if (MBB.isEHFuncletEntry() && STI.is32Bit() && STI.isOSWindows()) 2692 return true; 2693 2694 // Push GPRs. It increases frame size. 2695 const MachineFunction &MF = *MBB.getParent(); 2696 unsigned Opc = STI.is64Bit() ? X86::PUSH64r : X86::PUSH32r; 2697 for (const CalleeSavedInfo &I : llvm::reverse(CSI)) { 2698 Register Reg = I.getReg(); 2699 2700 if (!X86::GR64RegClass.contains(Reg) && !X86::GR32RegClass.contains(Reg)) 2701 continue; 2702 2703 const MachineRegisterInfo &MRI = MF.getRegInfo(); 2704 bool isLiveIn = MRI.isLiveIn(Reg); 2705 if (!isLiveIn) 2706 MBB.addLiveIn(Reg); 2707 2708 // Decide whether we can add a kill flag to the use. 2709 bool CanKill = !isLiveIn; 2710 // Check if any subregister is live-in 2711 if (CanKill) { 2712 for (MCRegAliasIterator AReg(Reg, TRI, false); AReg.isValid(); ++AReg) { 2713 if (MRI.isLiveIn(*AReg)) { 2714 CanKill = false; 2715 break; 2716 } 2717 } 2718 } 2719 2720 // Do not set a kill flag on values that are also marked as live-in. This 2721 // happens with the @llvm-returnaddress intrinsic and with arguments 2722 // passed in callee saved registers. 2723 // Omitting the kill flags is conservatively correct even if the live-in 2724 // is not used after all. 2725 BuildMI(MBB, MI, DL, TII.get(Opc)).addReg(Reg, getKillRegState(CanKill)) 2726 .setMIFlag(MachineInstr::FrameSetup); 2727 } 2728 2729 // Make XMM regs spilled. X86 does not have ability of push/pop XMM. 2730 // It can be done by spilling XMMs to stack frame. 2731 for (const CalleeSavedInfo &I : llvm::reverse(CSI)) { 2732 Register Reg = I.getReg(); 2733 if (X86::GR64RegClass.contains(Reg) || X86::GR32RegClass.contains(Reg)) 2734 continue; 2735 2736 // If this is k-register make sure we lookup via the largest legal type. 2737 MVT VT = MVT::Other; 2738 if (X86::VK16RegClass.contains(Reg)) 2739 VT = STI.hasBWI() ? MVT::v64i1 : MVT::v16i1; 2740 2741 // Add the callee-saved register as live-in. It's killed at the spill. 2742 MBB.addLiveIn(Reg); 2743 const TargetRegisterClass *RC = TRI->getMinimalPhysRegClass(Reg, VT); 2744 2745 TII.storeRegToStackSlot(MBB, MI, Reg, true, I.getFrameIdx(), RC, TRI); 2746 --MI; 2747 MI->setFlag(MachineInstr::FrameSetup); 2748 ++MI; 2749 } 2750 2751 return true; 2752 } 2753 2754 void X86FrameLowering::emitCatchRetReturnValue(MachineBasicBlock &MBB, 2755 MachineBasicBlock::iterator MBBI, 2756 MachineInstr *CatchRet) const { 2757 // SEH shouldn't use catchret. 2758 assert(!isAsynchronousEHPersonality(classifyEHPersonality( 2759 MBB.getParent()->getFunction().getPersonalityFn())) && 2760 "SEH should not use CATCHRET"); 2761 const DebugLoc &DL = CatchRet->getDebugLoc(); 2762 MachineBasicBlock *CatchRetTarget = CatchRet->getOperand(0).getMBB(); 2763 2764 // Fill EAX/RAX with the address of the target block. 2765 if (STI.is64Bit()) { 2766 // LEA64r CatchRetTarget(%rip), %rax 2767 BuildMI(MBB, MBBI, DL, TII.get(X86::LEA64r), X86::RAX) 2768 .addReg(X86::RIP) 2769 .addImm(0) 2770 .addReg(0) 2771 .addMBB(CatchRetTarget) 2772 .addReg(0); 2773 } else { 2774 // MOV32ri $CatchRetTarget, %eax 2775 BuildMI(MBB, MBBI, DL, TII.get(X86::MOV32ri), X86::EAX) 2776 .addMBB(CatchRetTarget); 2777 } 2778 2779 // Record that we've taken the address of CatchRetTarget and no longer just 2780 // reference it in a terminator. 2781 CatchRetTarget->setHasAddressTaken(); 2782 } 2783 2784 bool X86FrameLowering::restoreCalleeSavedRegisters( 2785 MachineBasicBlock &MBB, MachineBasicBlock::iterator MI, 2786 MutableArrayRef<CalleeSavedInfo> CSI, const TargetRegisterInfo *TRI) const { 2787 if (CSI.empty()) 2788 return false; 2789 2790 if (MI != MBB.end() && isFuncletReturnInstr(*MI) && STI.isOSWindows()) { 2791 // Don't restore CSRs in 32-bit EH funclets. Matches 2792 // spillCalleeSavedRegisters. 2793 if (STI.is32Bit()) 2794 return true; 2795 // Don't restore CSRs before an SEH catchret. SEH except blocks do not form 2796 // funclets. emitEpilogue transforms these to normal jumps. 2797 if (MI->getOpcode() == X86::CATCHRET) { 2798 const Function &F = MBB.getParent()->getFunction(); 2799 bool IsSEH = isAsynchronousEHPersonality( 2800 classifyEHPersonality(F.getPersonalityFn())); 2801 if (IsSEH) 2802 return true; 2803 } 2804 } 2805 2806 DebugLoc DL = MBB.findDebugLoc(MI); 2807 2808 // Reload XMMs from stack frame. 2809 for (const CalleeSavedInfo &I : CSI) { 2810 Register Reg = I.getReg(); 2811 if (X86::GR64RegClass.contains(Reg) || 2812 X86::GR32RegClass.contains(Reg)) 2813 continue; 2814 2815 // If this is k-register make sure we lookup via the largest legal type. 2816 MVT VT = MVT::Other; 2817 if (X86::VK16RegClass.contains(Reg)) 2818 VT = STI.hasBWI() ? MVT::v64i1 : MVT::v16i1; 2819 2820 const TargetRegisterClass *RC = TRI->getMinimalPhysRegClass(Reg, VT); 2821 TII.loadRegFromStackSlot(MBB, MI, Reg, I.getFrameIdx(), RC, TRI); 2822 } 2823 2824 // POP GPRs. 2825 unsigned Opc = STI.is64Bit() ? X86::POP64r : X86::POP32r; 2826 for (const CalleeSavedInfo &I : CSI) { 2827 Register Reg = I.getReg(); 2828 if (!X86::GR64RegClass.contains(Reg) && 2829 !X86::GR32RegClass.contains(Reg)) 2830 continue; 2831 2832 BuildMI(MBB, MI, DL, TII.get(Opc), Reg) 2833 .setMIFlag(MachineInstr::FrameDestroy); 2834 } 2835 return true; 2836 } 2837 2838 void X86FrameLowering::determineCalleeSaves(MachineFunction &MF, 2839 BitVector &SavedRegs, 2840 RegScavenger *RS) const { 2841 TargetFrameLowering::determineCalleeSaves(MF, SavedRegs, RS); 2842 2843 // Spill the BasePtr if it's used. 2844 if (TRI->hasBasePointer(MF)){ 2845 Register BasePtr = TRI->getBaseRegister(); 2846 if (STI.isTarget64BitILP32()) 2847 BasePtr = getX86SubSuperRegister(BasePtr, 64); 2848 SavedRegs.set(BasePtr); 2849 } 2850 } 2851 2852 static bool 2853 HasNestArgument(const MachineFunction *MF) { 2854 const Function &F = MF->getFunction(); 2855 for (Function::const_arg_iterator I = F.arg_begin(), E = F.arg_end(); 2856 I != E; I++) { 2857 if (I->hasNestAttr() && !I->use_empty()) 2858 return true; 2859 } 2860 return false; 2861 } 2862 2863 /// GetScratchRegister - Get a temp register for performing work in the 2864 /// segmented stack and the Erlang/HiPE stack prologue. Depending on platform 2865 /// and the properties of the function either one or two registers will be 2866 /// needed. Set primary to true for the first register, false for the second. 2867 static unsigned 2868 GetScratchRegister(bool Is64Bit, bool IsLP64, const MachineFunction &MF, bool Primary) { 2869 CallingConv::ID CallingConvention = MF.getFunction().getCallingConv(); 2870 2871 // Erlang stuff. 2872 if (CallingConvention == CallingConv::HiPE) { 2873 if (Is64Bit) 2874 return Primary ? X86::R14 : X86::R13; 2875 else 2876 return Primary ? X86::EBX : X86::EDI; 2877 } 2878 2879 if (Is64Bit) { 2880 if (IsLP64) 2881 return Primary ? X86::R11 : X86::R12; 2882 else 2883 return Primary ? X86::R11D : X86::R12D; 2884 } 2885 2886 bool IsNested = HasNestArgument(&MF); 2887 2888 if (CallingConvention == CallingConv::X86_FastCall || 2889 CallingConvention == CallingConv::Fast || 2890 CallingConvention == CallingConv::Tail) { 2891 if (IsNested) 2892 report_fatal_error("Segmented stacks does not support fastcall with " 2893 "nested function."); 2894 return Primary ? X86::EAX : X86::ECX; 2895 } 2896 if (IsNested) 2897 return Primary ? X86::EDX : X86::EAX; 2898 return Primary ? X86::ECX : X86::EAX; 2899 } 2900 2901 // The stack limit in the TCB is set to this many bytes above the actual stack 2902 // limit. 2903 static const uint64_t kSplitStackAvailable = 256; 2904 2905 void X86FrameLowering::adjustForSegmentedStacks( 2906 MachineFunction &MF, MachineBasicBlock &PrologueMBB) const { 2907 MachineFrameInfo &MFI = MF.getFrameInfo(); 2908 uint64_t StackSize; 2909 unsigned TlsReg, TlsOffset; 2910 DebugLoc DL; 2911 2912 // To support shrink-wrapping we would need to insert the new blocks 2913 // at the right place and update the branches to PrologueMBB. 2914 assert(&(*MF.begin()) == &PrologueMBB && "Shrink-wrapping not supported yet"); 2915 2916 unsigned ScratchReg = GetScratchRegister(Is64Bit, IsLP64, MF, true); 2917 assert(!MF.getRegInfo().isLiveIn(ScratchReg) && 2918 "Scratch register is live-in"); 2919 2920 if (MF.getFunction().isVarArg()) 2921 report_fatal_error("Segmented stacks do not support vararg functions."); 2922 if (!STI.isTargetLinux() && !STI.isTargetDarwin() && !STI.isTargetWin32() && 2923 !STI.isTargetWin64() && !STI.isTargetFreeBSD() && 2924 !STI.isTargetDragonFly()) 2925 report_fatal_error("Segmented stacks not supported on this platform."); 2926 2927 // Eventually StackSize will be calculated by a link-time pass; which will 2928 // also decide whether checking code needs to be injected into this particular 2929 // prologue. 2930 StackSize = MFI.getStackSize(); 2931 2932 // Do not generate a prologue for leaf functions with a stack of size zero. 2933 // For non-leaf functions we have to allow for the possibility that the 2934 // callis to a non-split function, as in PR37807. This function could also 2935 // take the address of a non-split function. When the linker tries to adjust 2936 // its non-existent prologue, it would fail with an error. Mark the object 2937 // file so that such failures are not errors. See this Go language bug-report 2938 // https://go-review.googlesource.com/c/go/+/148819/ 2939 if (StackSize == 0 && !MFI.hasTailCall()) { 2940 MF.getMMI().setHasNosplitStack(true); 2941 return; 2942 } 2943 2944 MachineBasicBlock *allocMBB = MF.CreateMachineBasicBlock(); 2945 MachineBasicBlock *checkMBB = MF.CreateMachineBasicBlock(); 2946 X86MachineFunctionInfo *X86FI = MF.getInfo<X86MachineFunctionInfo>(); 2947 bool IsNested = false; 2948 2949 // We need to know if the function has a nest argument only in 64 bit mode. 2950 if (Is64Bit) 2951 IsNested = HasNestArgument(&MF); 2952 2953 // The MOV R10, RAX needs to be in a different block, since the RET we emit in 2954 // allocMBB needs to be last (terminating) instruction. 2955 2956 for (const auto &LI : PrologueMBB.liveins()) { 2957 allocMBB->addLiveIn(LI); 2958 checkMBB->addLiveIn(LI); 2959 } 2960 2961 if (IsNested) 2962 allocMBB->addLiveIn(IsLP64 ? X86::R10 : X86::R10D); 2963 2964 MF.push_front(allocMBB); 2965 MF.push_front(checkMBB); 2966 2967 // When the frame size is less than 256 we just compare the stack 2968 // boundary directly to the value of the stack pointer, per gcc. 2969 bool CompareStackPointer = StackSize < kSplitStackAvailable; 2970 2971 // Read the limit off the current stacklet off the stack_guard location. 2972 if (Is64Bit) { 2973 if (STI.isTargetLinux()) { 2974 TlsReg = X86::FS; 2975 TlsOffset = IsLP64 ? 0x70 : 0x40; 2976 } else if (STI.isTargetDarwin()) { 2977 TlsReg = X86::GS; 2978 TlsOffset = 0x60 + 90*8; // See pthread_machdep.h. Steal TLS slot 90. 2979 } else if (STI.isTargetWin64()) { 2980 TlsReg = X86::GS; 2981 TlsOffset = 0x28; // pvArbitrary, reserved for application use 2982 } else if (STI.isTargetFreeBSD()) { 2983 TlsReg = X86::FS; 2984 TlsOffset = 0x18; 2985 } else if (STI.isTargetDragonFly()) { 2986 TlsReg = X86::FS; 2987 TlsOffset = 0x20; // use tls_tcb.tcb_segstack 2988 } else { 2989 report_fatal_error("Segmented stacks not supported on this platform."); 2990 } 2991 2992 if (CompareStackPointer) 2993 ScratchReg = IsLP64 ? X86::RSP : X86::ESP; 2994 else 2995 BuildMI(checkMBB, DL, TII.get(IsLP64 ? X86::LEA64r : X86::LEA64_32r), ScratchReg).addReg(X86::RSP) 2996 .addImm(1).addReg(0).addImm(-StackSize).addReg(0); 2997 2998 BuildMI(checkMBB, DL, TII.get(IsLP64 ? X86::CMP64rm : X86::CMP32rm)).addReg(ScratchReg) 2999 .addReg(0).addImm(1).addReg(0).addImm(TlsOffset).addReg(TlsReg); 3000 } else { 3001 if (STI.isTargetLinux()) { 3002 TlsReg = X86::GS; 3003 TlsOffset = 0x30; 3004 } else if (STI.isTargetDarwin()) { 3005 TlsReg = X86::GS; 3006 TlsOffset = 0x48 + 90*4; 3007 } else if (STI.isTargetWin32()) { 3008 TlsReg = X86::FS; 3009 TlsOffset = 0x14; // pvArbitrary, reserved for application use 3010 } else if (STI.isTargetDragonFly()) { 3011 TlsReg = X86::FS; 3012 TlsOffset = 0x10; // use tls_tcb.tcb_segstack 3013 } else if (STI.isTargetFreeBSD()) { 3014 report_fatal_error("Segmented stacks not supported on FreeBSD i386."); 3015 } else { 3016 report_fatal_error("Segmented stacks not supported on this platform."); 3017 } 3018 3019 if (CompareStackPointer) 3020 ScratchReg = X86::ESP; 3021 else 3022 BuildMI(checkMBB, DL, TII.get(X86::LEA32r), ScratchReg).addReg(X86::ESP) 3023 .addImm(1).addReg(0).addImm(-StackSize).addReg(0); 3024 3025 if (STI.isTargetLinux() || STI.isTargetWin32() || STI.isTargetWin64() || 3026 STI.isTargetDragonFly()) { 3027 BuildMI(checkMBB, DL, TII.get(X86::CMP32rm)).addReg(ScratchReg) 3028 .addReg(0).addImm(0).addReg(0).addImm(TlsOffset).addReg(TlsReg); 3029 } else if (STI.isTargetDarwin()) { 3030 3031 // TlsOffset doesn't fit into a mod r/m byte so we need an extra register. 3032 unsigned ScratchReg2; 3033 bool SaveScratch2; 3034 if (CompareStackPointer) { 3035 // The primary scratch register is available for holding the TLS offset. 3036 ScratchReg2 = GetScratchRegister(Is64Bit, IsLP64, MF, true); 3037 SaveScratch2 = false; 3038 } else { 3039 // Need to use a second register to hold the TLS offset 3040 ScratchReg2 = GetScratchRegister(Is64Bit, IsLP64, MF, false); 3041 3042 // Unfortunately, with fastcc the second scratch register may hold an 3043 // argument. 3044 SaveScratch2 = MF.getRegInfo().isLiveIn(ScratchReg2); 3045 } 3046 3047 // If Scratch2 is live-in then it needs to be saved. 3048 assert((!MF.getRegInfo().isLiveIn(ScratchReg2) || SaveScratch2) && 3049 "Scratch register is live-in and not saved"); 3050 3051 if (SaveScratch2) 3052 BuildMI(checkMBB, DL, TII.get(X86::PUSH32r)) 3053 .addReg(ScratchReg2, RegState::Kill); 3054 3055 BuildMI(checkMBB, DL, TII.get(X86::MOV32ri), ScratchReg2) 3056 .addImm(TlsOffset); 3057 BuildMI(checkMBB, DL, TII.get(X86::CMP32rm)) 3058 .addReg(ScratchReg) 3059 .addReg(ScratchReg2).addImm(1).addReg(0) 3060 .addImm(0) 3061 .addReg(TlsReg); 3062 3063 if (SaveScratch2) 3064 BuildMI(checkMBB, DL, TII.get(X86::POP32r), ScratchReg2); 3065 } 3066 } 3067 3068 // This jump is taken if SP >= (Stacklet Limit + Stack Space required). 3069 // It jumps to normal execution of the function body. 3070 BuildMI(checkMBB, DL, TII.get(X86::JCC_1)).addMBB(&PrologueMBB).addImm(X86::COND_A); 3071 3072 // On 32 bit we first push the arguments size and then the frame size. On 64 3073 // bit, we pass the stack frame size in r10 and the argument size in r11. 3074 if (Is64Bit) { 3075 // Functions with nested arguments use R10, so it needs to be saved across 3076 // the call to _morestack 3077 3078 const unsigned RegAX = IsLP64 ? X86::RAX : X86::EAX; 3079 const unsigned Reg10 = IsLP64 ? X86::R10 : X86::R10D; 3080 const unsigned Reg11 = IsLP64 ? X86::R11 : X86::R11D; 3081 const unsigned MOVrr = IsLP64 ? X86::MOV64rr : X86::MOV32rr; 3082 3083 if (IsNested) 3084 BuildMI(allocMBB, DL, TII.get(MOVrr), RegAX).addReg(Reg10); 3085 3086 BuildMI(allocMBB, DL, TII.get(getMOVriOpcode(IsLP64, StackSize)), Reg10) 3087 .addImm(StackSize); 3088 BuildMI(allocMBB, DL, 3089 TII.get(getMOVriOpcode(IsLP64, X86FI->getArgumentStackSize())), 3090 Reg11) 3091 .addImm(X86FI->getArgumentStackSize()); 3092 } else { 3093 BuildMI(allocMBB, DL, TII.get(X86::PUSHi32)) 3094 .addImm(X86FI->getArgumentStackSize()); 3095 BuildMI(allocMBB, DL, TII.get(X86::PUSHi32)) 3096 .addImm(StackSize); 3097 } 3098 3099 // __morestack is in libgcc 3100 if (Is64Bit && MF.getTarget().getCodeModel() == CodeModel::Large) { 3101 // Under the large code model, we cannot assume that __morestack lives 3102 // within 2^31 bytes of the call site, so we cannot use pc-relative 3103 // addressing. We cannot perform the call via a temporary register, 3104 // as the rax register may be used to store the static chain, and all 3105 // other suitable registers may be either callee-save or used for 3106 // parameter passing. We cannot use the stack at this point either 3107 // because __morestack manipulates the stack directly. 3108 // 3109 // To avoid these issues, perform an indirect call via a read-only memory 3110 // location containing the address. 3111 // 3112 // This solution is not perfect, as it assumes that the .rodata section 3113 // is laid out within 2^31 bytes of each function body, but this seems 3114 // to be sufficient for JIT. 3115 // FIXME: Add retpoline support and remove the error here.. 3116 if (STI.useIndirectThunkCalls()) 3117 report_fatal_error("Emitting morestack calls on 64-bit with the large " 3118 "code model and thunks not yet implemented."); 3119 BuildMI(allocMBB, DL, TII.get(X86::CALL64m)) 3120 .addReg(X86::RIP) 3121 .addImm(0) 3122 .addReg(0) 3123 .addExternalSymbol("__morestack_addr") 3124 .addReg(0); 3125 MF.getMMI().setUsesMorestackAddr(true); 3126 } else { 3127 if (Is64Bit) 3128 BuildMI(allocMBB, DL, TII.get(X86::CALL64pcrel32)) 3129 .addExternalSymbol("__morestack"); 3130 else 3131 BuildMI(allocMBB, DL, TII.get(X86::CALLpcrel32)) 3132 .addExternalSymbol("__morestack"); 3133 } 3134 3135 if (IsNested) 3136 BuildMI(allocMBB, DL, TII.get(X86::MORESTACK_RET_RESTORE_R10)); 3137 else 3138 BuildMI(allocMBB, DL, TII.get(X86::MORESTACK_RET)); 3139 3140 allocMBB->addSuccessor(&PrologueMBB); 3141 3142 checkMBB->addSuccessor(allocMBB, BranchProbability::getZero()); 3143 checkMBB->addSuccessor(&PrologueMBB, BranchProbability::getOne()); 3144 3145 #ifdef EXPENSIVE_CHECKS 3146 MF.verify(); 3147 #endif 3148 } 3149 3150 /// Lookup an ERTS parameter in the !hipe.literals named metadata node. 3151 /// HiPE provides Erlang Runtime System-internal parameters, such as PCB offsets 3152 /// to fields it needs, through a named metadata node "hipe.literals" containing 3153 /// name-value pairs. 3154 static unsigned getHiPELiteral( 3155 NamedMDNode *HiPELiteralsMD, const StringRef LiteralName) { 3156 for (int i = 0, e = HiPELiteralsMD->getNumOperands(); i != e; ++i) { 3157 MDNode *Node = HiPELiteralsMD->getOperand(i); 3158 if (Node->getNumOperands() != 2) continue; 3159 MDString *NodeName = dyn_cast<MDString>(Node->getOperand(0)); 3160 ValueAsMetadata *NodeVal = dyn_cast<ValueAsMetadata>(Node->getOperand(1)); 3161 if (!NodeName || !NodeVal) continue; 3162 ConstantInt *ValConst = dyn_cast_or_null<ConstantInt>(NodeVal->getValue()); 3163 if (ValConst && NodeName->getString() == LiteralName) { 3164 return ValConst->getZExtValue(); 3165 } 3166 } 3167 3168 report_fatal_error("HiPE literal " + LiteralName 3169 + " required but not provided"); 3170 } 3171 3172 // Return true if there are no non-ehpad successors to MBB and there are no 3173 // non-meta instructions between MBBI and MBB.end(). 3174 static bool blockEndIsUnreachable(const MachineBasicBlock &MBB, 3175 MachineBasicBlock::const_iterator MBBI) { 3176 return llvm::all_of( 3177 MBB.successors(), 3178 [](const MachineBasicBlock *Succ) { return Succ->isEHPad(); }) && 3179 std::all_of(MBBI, MBB.end(), [](const MachineInstr &MI) { 3180 return MI.isMetaInstruction(); 3181 }); 3182 } 3183 3184 /// Erlang programs may need a special prologue to handle the stack size they 3185 /// might need at runtime. That is because Erlang/OTP does not implement a C 3186 /// stack but uses a custom implementation of hybrid stack/heap architecture. 3187 /// (for more information see Eric Stenman's Ph.D. thesis: 3188 /// http://publications.uu.se/uu/fulltext/nbn_se_uu_diva-2688.pdf) 3189 /// 3190 /// CheckStack: 3191 /// temp0 = sp - MaxStack 3192 /// if( temp0 < SP_LIMIT(P) ) goto IncStack else goto OldStart 3193 /// OldStart: 3194 /// ... 3195 /// IncStack: 3196 /// call inc_stack # doubles the stack space 3197 /// temp0 = sp - MaxStack 3198 /// if( temp0 < SP_LIMIT(P) ) goto IncStack else goto OldStart 3199 void X86FrameLowering::adjustForHiPEPrologue( 3200 MachineFunction &MF, MachineBasicBlock &PrologueMBB) const { 3201 MachineFrameInfo &MFI = MF.getFrameInfo(); 3202 DebugLoc DL; 3203 3204 // To support shrink-wrapping we would need to insert the new blocks 3205 // at the right place and update the branches to PrologueMBB. 3206 assert(&(*MF.begin()) == &PrologueMBB && "Shrink-wrapping not supported yet"); 3207 3208 // HiPE-specific values 3209 NamedMDNode *HiPELiteralsMD = MF.getMMI().getModule() 3210 ->getNamedMetadata("hipe.literals"); 3211 if (!HiPELiteralsMD) 3212 report_fatal_error( 3213 "Can't generate HiPE prologue without runtime parameters"); 3214 const unsigned HipeLeafWords 3215 = getHiPELiteral(HiPELiteralsMD, 3216 Is64Bit ? "AMD64_LEAF_WORDS" : "X86_LEAF_WORDS"); 3217 const unsigned CCRegisteredArgs = Is64Bit ? 6 : 5; 3218 const unsigned Guaranteed = HipeLeafWords * SlotSize; 3219 unsigned CallerStkArity = MF.getFunction().arg_size() > CCRegisteredArgs ? 3220 MF.getFunction().arg_size() - CCRegisteredArgs : 0; 3221 unsigned MaxStack = MFI.getStackSize() + CallerStkArity*SlotSize + SlotSize; 3222 3223 assert(STI.isTargetLinux() && 3224 "HiPE prologue is only supported on Linux operating systems."); 3225 3226 // Compute the largest caller's frame that is needed to fit the callees' 3227 // frames. This 'MaxStack' is computed from: 3228 // 3229 // a) the fixed frame size, which is the space needed for all spilled temps, 3230 // b) outgoing on-stack parameter areas, and 3231 // c) the minimum stack space this function needs to make available for the 3232 // functions it calls (a tunable ABI property). 3233 if (MFI.hasCalls()) { 3234 unsigned MoreStackForCalls = 0; 3235 3236 for (auto &MBB : MF) { 3237 for (auto &MI : MBB) { 3238 if (!MI.isCall()) 3239 continue; 3240 3241 // Get callee operand. 3242 const MachineOperand &MO = MI.getOperand(0); 3243 3244 // Only take account of global function calls (no closures etc.). 3245 if (!MO.isGlobal()) 3246 continue; 3247 3248 const Function *F = dyn_cast<Function>(MO.getGlobal()); 3249 if (!F) 3250 continue; 3251 3252 // Do not update 'MaxStack' for primitive and built-in functions 3253 // (encoded with names either starting with "erlang."/"bif_" or not 3254 // having a ".", such as a simple <Module>.<Function>.<Arity>, or an 3255 // "_", such as the BIF "suspend_0") as they are executed on another 3256 // stack. 3257 if (F->getName().contains("erlang.") || F->getName().contains("bif_") || 3258 F->getName().find_first_of("._") == StringRef::npos) 3259 continue; 3260 3261 unsigned CalleeStkArity = 3262 F->arg_size() > CCRegisteredArgs ? F->arg_size()-CCRegisteredArgs : 0; 3263 if (HipeLeafWords - 1 > CalleeStkArity) 3264 MoreStackForCalls = std::max(MoreStackForCalls, 3265 (HipeLeafWords - 1 - CalleeStkArity) * SlotSize); 3266 } 3267 } 3268 MaxStack += MoreStackForCalls; 3269 } 3270 3271 // If the stack frame needed is larger than the guaranteed then runtime checks 3272 // and calls to "inc_stack_0" BIF should be inserted in the assembly prologue. 3273 if (MaxStack > Guaranteed) { 3274 MachineBasicBlock *stackCheckMBB = MF.CreateMachineBasicBlock(); 3275 MachineBasicBlock *incStackMBB = MF.CreateMachineBasicBlock(); 3276 3277 for (const auto &LI : PrologueMBB.liveins()) { 3278 stackCheckMBB->addLiveIn(LI); 3279 incStackMBB->addLiveIn(LI); 3280 } 3281 3282 MF.push_front(incStackMBB); 3283 MF.push_front(stackCheckMBB); 3284 3285 unsigned ScratchReg, SPReg, PReg, SPLimitOffset; 3286 unsigned LEAop, CMPop, CALLop; 3287 SPLimitOffset = getHiPELiteral(HiPELiteralsMD, "P_NSP_LIMIT"); 3288 if (Is64Bit) { 3289 SPReg = X86::RSP; 3290 PReg = X86::RBP; 3291 LEAop = X86::LEA64r; 3292 CMPop = X86::CMP64rm; 3293 CALLop = X86::CALL64pcrel32; 3294 } else { 3295 SPReg = X86::ESP; 3296 PReg = X86::EBP; 3297 LEAop = X86::LEA32r; 3298 CMPop = X86::CMP32rm; 3299 CALLop = X86::CALLpcrel32; 3300 } 3301 3302 ScratchReg = GetScratchRegister(Is64Bit, IsLP64, MF, true); 3303 assert(!MF.getRegInfo().isLiveIn(ScratchReg) && 3304 "HiPE prologue scratch register is live-in"); 3305 3306 // Create new MBB for StackCheck: 3307 addRegOffset(BuildMI(stackCheckMBB, DL, TII.get(LEAop), ScratchReg), 3308 SPReg, false, -MaxStack); 3309 // SPLimitOffset is in a fixed heap location (pointed by BP). 3310 addRegOffset(BuildMI(stackCheckMBB, DL, TII.get(CMPop)) 3311 .addReg(ScratchReg), PReg, false, SPLimitOffset); 3312 BuildMI(stackCheckMBB, DL, TII.get(X86::JCC_1)).addMBB(&PrologueMBB).addImm(X86::COND_AE); 3313 3314 // Create new MBB for IncStack: 3315 BuildMI(incStackMBB, DL, TII.get(CALLop)). 3316 addExternalSymbol("inc_stack_0"); 3317 addRegOffset(BuildMI(incStackMBB, DL, TII.get(LEAop), ScratchReg), 3318 SPReg, false, -MaxStack); 3319 addRegOffset(BuildMI(incStackMBB, DL, TII.get(CMPop)) 3320 .addReg(ScratchReg), PReg, false, SPLimitOffset); 3321 BuildMI(incStackMBB, DL, TII.get(X86::JCC_1)).addMBB(incStackMBB).addImm(X86::COND_LE); 3322 3323 stackCheckMBB->addSuccessor(&PrologueMBB, {99, 100}); 3324 stackCheckMBB->addSuccessor(incStackMBB, {1, 100}); 3325 incStackMBB->addSuccessor(&PrologueMBB, {99, 100}); 3326 incStackMBB->addSuccessor(incStackMBB, {1, 100}); 3327 } 3328 #ifdef EXPENSIVE_CHECKS 3329 MF.verify(); 3330 #endif 3331 } 3332 3333 bool X86FrameLowering::adjustStackWithPops(MachineBasicBlock &MBB, 3334 MachineBasicBlock::iterator MBBI, 3335 const DebugLoc &DL, 3336 int Offset) const { 3337 if (Offset <= 0) 3338 return false; 3339 3340 if (Offset % SlotSize) 3341 return false; 3342 3343 int NumPops = Offset / SlotSize; 3344 // This is only worth it if we have at most 2 pops. 3345 if (NumPops != 1 && NumPops != 2) 3346 return false; 3347 3348 // Handle only the trivial case where the adjustment directly follows 3349 // a call. This is the most common one, anyway. 3350 if (MBBI == MBB.begin()) 3351 return false; 3352 MachineBasicBlock::iterator Prev = std::prev(MBBI); 3353 if (!Prev->isCall() || !Prev->getOperand(1).isRegMask()) 3354 return false; 3355 3356 unsigned Regs[2]; 3357 unsigned FoundRegs = 0; 3358 3359 const MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo(); 3360 const MachineOperand &RegMask = Prev->getOperand(1); 3361 3362 auto &RegClass = 3363 Is64Bit ? X86::GR64_NOREX_NOSPRegClass : X86::GR32_NOREX_NOSPRegClass; 3364 // Try to find up to NumPops free registers. 3365 for (auto Candidate : RegClass) { 3366 // Poor man's liveness: 3367 // Since we're immediately after a call, any register that is clobbered 3368 // by the call and not defined by it can be considered dead. 3369 if (!RegMask.clobbersPhysReg(Candidate)) 3370 continue; 3371 3372 // Don't clobber reserved registers 3373 if (MRI.isReserved(Candidate)) 3374 continue; 3375 3376 bool IsDef = false; 3377 for (const MachineOperand &MO : Prev->implicit_operands()) { 3378 if (MO.isReg() && MO.isDef() && 3379 TRI->isSuperOrSubRegisterEq(MO.getReg(), Candidate)) { 3380 IsDef = true; 3381 break; 3382 } 3383 } 3384 3385 if (IsDef) 3386 continue; 3387 3388 Regs[FoundRegs++] = Candidate; 3389 if (FoundRegs == (unsigned)NumPops) 3390 break; 3391 } 3392 3393 if (FoundRegs == 0) 3394 return false; 3395 3396 // If we found only one free register, but need two, reuse the same one twice. 3397 while (FoundRegs < (unsigned)NumPops) 3398 Regs[FoundRegs++] = Regs[0]; 3399 3400 for (int i = 0; i < NumPops; ++i) 3401 BuildMI(MBB, MBBI, DL, 3402 TII.get(STI.is64Bit() ? X86::POP64r : X86::POP32r), Regs[i]); 3403 3404 return true; 3405 } 3406 3407 MachineBasicBlock::iterator X86FrameLowering:: 3408 eliminateCallFramePseudoInstr(MachineFunction &MF, MachineBasicBlock &MBB, 3409 MachineBasicBlock::iterator I) const { 3410 bool reserveCallFrame = hasReservedCallFrame(MF); 3411 unsigned Opcode = I->getOpcode(); 3412 bool isDestroy = Opcode == TII.getCallFrameDestroyOpcode(); 3413 DebugLoc DL = I->getDebugLoc(); // copy DebugLoc as I will be erased. 3414 uint64_t Amount = TII.getFrameSize(*I); 3415 uint64_t InternalAmt = (isDestroy || Amount) ? TII.getFrameAdjustment(*I) : 0; 3416 I = MBB.erase(I); 3417 auto InsertPos = skipDebugInstructionsForward(I, MBB.end()); 3418 3419 // Try to avoid emitting dead SP adjustments if the block end is unreachable, 3420 // typically because the function is marked noreturn (abort, throw, 3421 // assert_fail, etc). 3422 if (isDestroy && blockEndIsUnreachable(MBB, I)) 3423 return I; 3424 3425 if (!reserveCallFrame) { 3426 // If the stack pointer can be changed after prologue, turn the 3427 // adjcallstackup instruction into a 'sub ESP, <amt>' and the 3428 // adjcallstackdown instruction into 'add ESP, <amt>' 3429 3430 // We need to keep the stack aligned properly. To do this, we round the 3431 // amount of space needed for the outgoing arguments up to the next 3432 // alignment boundary. 3433 Amount = alignTo(Amount, getStackAlign()); 3434 3435 const Function &F = MF.getFunction(); 3436 bool WindowsCFI = MF.getTarget().getMCAsmInfo()->usesWindowsCFI(); 3437 bool DwarfCFI = !WindowsCFI && MF.needsFrameMoves(); 3438 3439 // If we have any exception handlers in this function, and we adjust 3440 // the SP before calls, we may need to indicate this to the unwinder 3441 // using GNU_ARGS_SIZE. Note that this may be necessary even when 3442 // Amount == 0, because the preceding function may have set a non-0 3443 // GNU_ARGS_SIZE. 3444 // TODO: We don't need to reset this between subsequent functions, 3445 // if it didn't change. 3446 bool HasDwarfEHHandlers = !WindowsCFI && !MF.getLandingPads().empty(); 3447 3448 if (HasDwarfEHHandlers && !isDestroy && 3449 MF.getInfo<X86MachineFunctionInfo>()->getHasPushSequences()) 3450 BuildCFI(MBB, InsertPos, DL, 3451 MCCFIInstruction::createGnuArgsSize(nullptr, Amount)); 3452 3453 if (Amount == 0) 3454 return I; 3455 3456 // Factor out the amount that gets handled inside the sequence 3457 // (Pushes of argument for frame setup, callee pops for frame destroy) 3458 Amount -= InternalAmt; 3459 3460 // TODO: This is needed only if we require precise CFA. 3461 // If this is a callee-pop calling convention, emit a CFA adjust for 3462 // the amount the callee popped. 3463 if (isDestroy && InternalAmt && DwarfCFI && !hasFP(MF)) 3464 BuildCFI(MBB, InsertPos, DL, 3465 MCCFIInstruction::createAdjustCfaOffset(nullptr, -InternalAmt)); 3466 3467 // Add Amount to SP to destroy a frame, or subtract to setup. 3468 int64_t StackAdjustment = isDestroy ? Amount : -Amount; 3469 3470 if (StackAdjustment) { 3471 // Merge with any previous or following adjustment instruction. Note: the 3472 // instructions merged with here do not have CFI, so their stack 3473 // adjustments do not feed into CfaAdjustment. 3474 StackAdjustment += mergeSPUpdates(MBB, InsertPos, true); 3475 StackAdjustment += mergeSPUpdates(MBB, InsertPos, false); 3476 3477 if (StackAdjustment) { 3478 if (!(F.hasMinSize() && 3479 adjustStackWithPops(MBB, InsertPos, DL, StackAdjustment))) 3480 BuildStackAdjustment(MBB, InsertPos, DL, StackAdjustment, 3481 /*InEpilogue=*/false); 3482 } 3483 } 3484 3485 if (DwarfCFI && !hasFP(MF)) { 3486 // If we don't have FP, but need to generate unwind information, 3487 // we need to set the correct CFA offset after the stack adjustment. 3488 // How much we adjust the CFA offset depends on whether we're emitting 3489 // CFI only for EH purposes or for debugging. EH only requires the CFA 3490 // offset to be correct at each call site, while for debugging we want 3491 // it to be more precise. 3492 3493 int64_t CfaAdjustment = -StackAdjustment; 3494 // TODO: When not using precise CFA, we also need to adjust for the 3495 // InternalAmt here. 3496 if (CfaAdjustment) { 3497 BuildCFI(MBB, InsertPos, DL, 3498 MCCFIInstruction::createAdjustCfaOffset(nullptr, 3499 CfaAdjustment)); 3500 } 3501 } 3502 3503 return I; 3504 } 3505 3506 if (InternalAmt) { 3507 MachineBasicBlock::iterator CI = I; 3508 MachineBasicBlock::iterator B = MBB.begin(); 3509 while (CI != B && !std::prev(CI)->isCall()) 3510 --CI; 3511 BuildStackAdjustment(MBB, CI, DL, -InternalAmt, /*InEpilogue=*/false); 3512 } 3513 3514 return I; 3515 } 3516 3517 bool X86FrameLowering::canUseAsPrologue(const MachineBasicBlock &MBB) const { 3518 assert(MBB.getParent() && "Block is not attached to a function!"); 3519 const MachineFunction &MF = *MBB.getParent(); 3520 if (!MBB.isLiveIn(X86::EFLAGS)) 3521 return true; 3522 3523 const X86MachineFunctionInfo *X86FI = MF.getInfo<X86MachineFunctionInfo>(); 3524 return !TRI->hasStackRealignment(MF) && !X86FI->hasSwiftAsyncContext(); 3525 } 3526 3527 bool X86FrameLowering::canUseAsEpilogue(const MachineBasicBlock &MBB) const { 3528 assert(MBB.getParent() && "Block is not attached to a function!"); 3529 3530 // Win64 has strict requirements in terms of epilogue and we are 3531 // not taking a chance at messing with them. 3532 // I.e., unless this block is already an exit block, we can't use 3533 // it as an epilogue. 3534 if (STI.isTargetWin64() && !MBB.succ_empty() && !MBB.isReturnBlock()) 3535 return false; 3536 3537 // Swift async context epilogue has a BTR instruction that clobbers parts of 3538 // EFLAGS. 3539 const MachineFunction &MF = *MBB.getParent(); 3540 if (MF.getInfo<X86MachineFunctionInfo>()->hasSwiftAsyncContext()) 3541 return !flagsNeedToBePreservedBeforeTheTerminators(MBB); 3542 3543 if (canUseLEAForSPInEpilogue(*MBB.getParent())) 3544 return true; 3545 3546 // If we cannot use LEA to adjust SP, we may need to use ADD, which 3547 // clobbers the EFLAGS. Check that we do not need to preserve it, 3548 // otherwise, conservatively assume this is not 3549 // safe to insert the epilogue here. 3550 return !flagsNeedToBePreservedBeforeTheTerminators(MBB); 3551 } 3552 3553 bool X86FrameLowering::enableShrinkWrapping(const MachineFunction &MF) const { 3554 // If we may need to emit frameless compact unwind information, give 3555 // up as this is currently broken: PR25614. 3556 bool CompactUnwind = 3557 MF.getMMI().getContext().getObjectFileInfo()->getCompactUnwindSection() != 3558 nullptr; 3559 return (MF.getFunction().hasFnAttribute(Attribute::NoUnwind) || hasFP(MF) || 3560 !CompactUnwind) && 3561 // The lowering of segmented stack and HiPE only support entry 3562 // blocks as prologue blocks: PR26107. This limitation may be 3563 // lifted if we fix: 3564 // - adjustForSegmentedStacks 3565 // - adjustForHiPEPrologue 3566 MF.getFunction().getCallingConv() != CallingConv::HiPE && 3567 !MF.shouldSplitStack(); 3568 } 3569 3570 MachineBasicBlock::iterator X86FrameLowering::restoreWin32EHStackPointers( 3571 MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, 3572 const DebugLoc &DL, bool RestoreSP) const { 3573 assert(STI.isTargetWindowsMSVC() && "funclets only supported in MSVC env"); 3574 assert(STI.isTargetWin32() && "EBP/ESI restoration only required on win32"); 3575 assert(STI.is32Bit() && !Uses64BitFramePtr && 3576 "restoring EBP/ESI on non-32-bit target"); 3577 3578 MachineFunction &MF = *MBB.getParent(); 3579 Register FramePtr = TRI->getFrameRegister(MF); 3580 Register BasePtr = TRI->getBaseRegister(); 3581 WinEHFuncInfo &FuncInfo = *MF.getWinEHFuncInfo(); 3582 X86MachineFunctionInfo *X86FI = MF.getInfo<X86MachineFunctionInfo>(); 3583 MachineFrameInfo &MFI = MF.getFrameInfo(); 3584 3585 // FIXME: Don't set FrameSetup flag in catchret case. 3586 3587 int FI = FuncInfo.EHRegNodeFrameIndex; 3588 int EHRegSize = MFI.getObjectSize(FI); 3589 3590 if (RestoreSP) { 3591 // MOV32rm -EHRegSize(%ebp), %esp 3592 addRegOffset(BuildMI(MBB, MBBI, DL, TII.get(X86::MOV32rm), X86::ESP), 3593 X86::EBP, true, -EHRegSize) 3594 .setMIFlag(MachineInstr::FrameSetup); 3595 } 3596 3597 Register UsedReg; 3598 int EHRegOffset = getFrameIndexReference(MF, FI, UsedReg).getFixed(); 3599 int EndOffset = -EHRegOffset - EHRegSize; 3600 FuncInfo.EHRegNodeEndOffset = EndOffset; 3601 3602 if (UsedReg == FramePtr) { 3603 // ADD $offset, %ebp 3604 unsigned ADDri = getADDriOpcode(false, EndOffset); 3605 BuildMI(MBB, MBBI, DL, TII.get(ADDri), FramePtr) 3606 .addReg(FramePtr) 3607 .addImm(EndOffset) 3608 .setMIFlag(MachineInstr::FrameSetup) 3609 ->getOperand(3) 3610 .setIsDead(); 3611 assert(EndOffset >= 0 && 3612 "end of registration object above normal EBP position!"); 3613 } else if (UsedReg == BasePtr) { 3614 // LEA offset(%ebp), %esi 3615 addRegOffset(BuildMI(MBB, MBBI, DL, TII.get(X86::LEA32r), BasePtr), 3616 FramePtr, false, EndOffset) 3617 .setMIFlag(MachineInstr::FrameSetup); 3618 // MOV32rm SavedEBPOffset(%esi), %ebp 3619 assert(X86FI->getHasSEHFramePtrSave()); 3620 int Offset = 3621 getFrameIndexReference(MF, X86FI->getSEHFramePtrSaveIndex(), UsedReg) 3622 .getFixed(); 3623 assert(UsedReg == BasePtr); 3624 addRegOffset(BuildMI(MBB, MBBI, DL, TII.get(X86::MOV32rm), FramePtr), 3625 UsedReg, true, Offset) 3626 .setMIFlag(MachineInstr::FrameSetup); 3627 } else { 3628 llvm_unreachable("32-bit frames with WinEH must use FramePtr or BasePtr"); 3629 } 3630 return MBBI; 3631 } 3632 3633 int X86FrameLowering::getInitialCFAOffset(const MachineFunction &MF) const { 3634 return TRI->getSlotSize(); 3635 } 3636 3637 Register 3638 X86FrameLowering::getInitialCFARegister(const MachineFunction &MF) const { 3639 return TRI->getDwarfRegNum(StackPtr, true); 3640 } 3641 3642 namespace { 3643 // Struct used by orderFrameObjects to help sort the stack objects. 3644 struct X86FrameSortingObject { 3645 bool IsValid = false; // true if we care about this Object. 3646 unsigned ObjectIndex = 0; // Index of Object into MFI list. 3647 unsigned ObjectSize = 0; // Size of Object in bytes. 3648 Align ObjectAlignment = Align(1); // Alignment of Object in bytes. 3649 unsigned ObjectNumUses = 0; // Object static number of uses. 3650 }; 3651 3652 // The comparison function we use for std::sort to order our local 3653 // stack symbols. The current algorithm is to use an estimated 3654 // "density". This takes into consideration the size and number of 3655 // uses each object has in order to roughly minimize code size. 3656 // So, for example, an object of size 16B that is referenced 5 times 3657 // will get higher priority than 4 4B objects referenced 1 time each. 3658 // It's not perfect and we may be able to squeeze a few more bytes out of 3659 // it (for example : 0(esp) requires fewer bytes, symbols allocated at the 3660 // fringe end can have special consideration, given their size is less 3661 // important, etc.), but the algorithmic complexity grows too much to be 3662 // worth the extra gains we get. This gets us pretty close. 3663 // The final order leaves us with objects with highest priority going 3664 // at the end of our list. 3665 struct X86FrameSortingComparator { 3666 inline bool operator()(const X86FrameSortingObject &A, 3667 const X86FrameSortingObject &B) const { 3668 uint64_t DensityAScaled, DensityBScaled; 3669 3670 // For consistency in our comparison, all invalid objects are placed 3671 // at the end. This also allows us to stop walking when we hit the 3672 // first invalid item after it's all sorted. 3673 if (!A.IsValid) 3674 return false; 3675 if (!B.IsValid) 3676 return true; 3677 3678 // The density is calculated by doing : 3679 // (double)DensityA = A.ObjectNumUses / A.ObjectSize 3680 // (double)DensityB = B.ObjectNumUses / B.ObjectSize 3681 // Since this approach may cause inconsistencies in 3682 // the floating point <, >, == comparisons, depending on the floating 3683 // point model with which the compiler was built, we're going 3684 // to scale both sides by multiplying with 3685 // A.ObjectSize * B.ObjectSize. This ends up factoring away 3686 // the division and, with it, the need for any floating point 3687 // arithmetic. 3688 DensityAScaled = static_cast<uint64_t>(A.ObjectNumUses) * 3689 static_cast<uint64_t>(B.ObjectSize); 3690 DensityBScaled = static_cast<uint64_t>(B.ObjectNumUses) * 3691 static_cast<uint64_t>(A.ObjectSize); 3692 3693 // If the two densities are equal, prioritize highest alignment 3694 // objects. This allows for similar alignment objects 3695 // to be packed together (given the same density). 3696 // There's room for improvement here, also, since we can pack 3697 // similar alignment (different density) objects next to each 3698 // other to save padding. This will also require further 3699 // complexity/iterations, and the overall gain isn't worth it, 3700 // in general. Something to keep in mind, though. 3701 if (DensityAScaled == DensityBScaled) 3702 return A.ObjectAlignment < B.ObjectAlignment; 3703 3704 return DensityAScaled < DensityBScaled; 3705 } 3706 }; 3707 } // namespace 3708 3709 // Order the symbols in the local stack. 3710 // We want to place the local stack objects in some sort of sensible order. 3711 // The heuristic we use is to try and pack them according to static number 3712 // of uses and size of object in order to minimize code size. 3713 void X86FrameLowering::orderFrameObjects( 3714 const MachineFunction &MF, SmallVectorImpl<int> &ObjectsToAllocate) const { 3715 const MachineFrameInfo &MFI = MF.getFrameInfo(); 3716 3717 // Don't waste time if there's nothing to do. 3718 if (ObjectsToAllocate.empty()) 3719 return; 3720 3721 // Create an array of all MFI objects. We won't need all of these 3722 // objects, but we're going to create a full array of them to make 3723 // it easier to index into when we're counting "uses" down below. 3724 // We want to be able to easily/cheaply access an object by simply 3725 // indexing into it, instead of having to search for it every time. 3726 std::vector<X86FrameSortingObject> SortingObjects(MFI.getObjectIndexEnd()); 3727 3728 // Walk the objects we care about and mark them as such in our working 3729 // struct. 3730 for (auto &Obj : ObjectsToAllocate) { 3731 SortingObjects[Obj].IsValid = true; 3732 SortingObjects[Obj].ObjectIndex = Obj; 3733 SortingObjects[Obj].ObjectAlignment = MFI.getObjectAlign(Obj); 3734 // Set the size. 3735 int ObjectSize = MFI.getObjectSize(Obj); 3736 if (ObjectSize == 0) 3737 // Variable size. Just use 4. 3738 SortingObjects[Obj].ObjectSize = 4; 3739 else 3740 SortingObjects[Obj].ObjectSize = ObjectSize; 3741 } 3742 3743 // Count the number of uses for each object. 3744 for (auto &MBB : MF) { 3745 for (auto &MI : MBB) { 3746 if (MI.isDebugInstr()) 3747 continue; 3748 for (const MachineOperand &MO : MI.operands()) { 3749 // Check to see if it's a local stack symbol. 3750 if (!MO.isFI()) 3751 continue; 3752 int Index = MO.getIndex(); 3753 // Check to see if it falls within our range, and is tagged 3754 // to require ordering. 3755 if (Index >= 0 && Index < MFI.getObjectIndexEnd() && 3756 SortingObjects[Index].IsValid) 3757 SortingObjects[Index].ObjectNumUses++; 3758 } 3759 } 3760 } 3761 3762 // Sort the objects using X86FrameSortingAlgorithm (see its comment for 3763 // info). 3764 llvm::stable_sort(SortingObjects, X86FrameSortingComparator()); 3765 3766 // Now modify the original list to represent the final order that 3767 // we want. The order will depend on whether we're going to access them 3768 // from the stack pointer or the frame pointer. For SP, the list should 3769 // end up with the END containing objects that we want with smaller offsets. 3770 // For FP, it should be flipped. 3771 int i = 0; 3772 for (auto &Obj : SortingObjects) { 3773 // All invalid items are sorted at the end, so it's safe to stop. 3774 if (!Obj.IsValid) 3775 break; 3776 ObjectsToAllocate[i++] = Obj.ObjectIndex; 3777 } 3778 3779 // Flip it if we're accessing off of the FP. 3780 if (!TRI->hasStackRealignment(MF) && hasFP(MF)) 3781 std::reverse(ObjectsToAllocate.begin(), ObjectsToAllocate.end()); 3782 } 3783 3784 3785 unsigned X86FrameLowering::getWinEHParentFrameOffset(const MachineFunction &MF) const { 3786 // RDX, the parent frame pointer, is homed into 16(%rsp) in the prologue. 3787 unsigned Offset = 16; 3788 // RBP is immediately pushed. 3789 Offset += SlotSize; 3790 // All callee-saved registers are then pushed. 3791 Offset += MF.getInfo<X86MachineFunctionInfo>()->getCalleeSavedFrameSize(); 3792 // Every funclet allocates enough stack space for the largest outgoing call. 3793 Offset += getWinEHFuncletFrameSize(MF); 3794 return Offset; 3795 } 3796 3797 void X86FrameLowering::processFunctionBeforeFrameFinalized( 3798 MachineFunction &MF, RegScavenger *RS) const { 3799 // Mark the function as not having WinCFI. We will set it back to true in 3800 // emitPrologue if it gets called and emits CFI. 3801 MF.setHasWinCFI(false); 3802 3803 // If we are using Windows x64 CFI, ensure that the stack is always 8 byte 3804 // aligned. The format doesn't support misaligned stack adjustments. 3805 if (MF.getTarget().getMCAsmInfo()->usesWindowsCFI()) 3806 MF.getFrameInfo().ensureMaxAlignment(Align(SlotSize)); 3807 3808 // If this function isn't doing Win64-style C++ EH, we don't need to do 3809 // anything. 3810 if (STI.is64Bit() && MF.hasEHFunclets() && 3811 classifyEHPersonality(MF.getFunction().getPersonalityFn()) == 3812 EHPersonality::MSVC_CXX) { 3813 adjustFrameForMsvcCxxEh(MF); 3814 } 3815 } 3816 3817 void X86FrameLowering::adjustFrameForMsvcCxxEh(MachineFunction &MF) const { 3818 // Win64 C++ EH needs to allocate the UnwindHelp object at some fixed offset 3819 // relative to RSP after the prologue. Find the offset of the last fixed 3820 // object, so that we can allocate a slot immediately following it. If there 3821 // were no fixed objects, use offset -SlotSize, which is immediately after the 3822 // return address. Fixed objects have negative frame indices. 3823 MachineFrameInfo &MFI = MF.getFrameInfo(); 3824 WinEHFuncInfo &EHInfo = *MF.getWinEHFuncInfo(); 3825 int64_t MinFixedObjOffset = -SlotSize; 3826 for (int I = MFI.getObjectIndexBegin(); I < 0; ++I) 3827 MinFixedObjOffset = std::min(MinFixedObjOffset, MFI.getObjectOffset(I)); 3828 3829 for (WinEHTryBlockMapEntry &TBME : EHInfo.TryBlockMap) { 3830 for (WinEHHandlerType &H : TBME.HandlerArray) { 3831 int FrameIndex = H.CatchObj.FrameIndex; 3832 if (FrameIndex != INT_MAX) { 3833 // Ensure alignment. 3834 unsigned Align = MFI.getObjectAlign(FrameIndex).value(); 3835 MinFixedObjOffset -= std::abs(MinFixedObjOffset) % Align; 3836 MinFixedObjOffset -= MFI.getObjectSize(FrameIndex); 3837 MFI.setObjectOffset(FrameIndex, MinFixedObjOffset); 3838 } 3839 } 3840 } 3841 3842 // Ensure alignment. 3843 MinFixedObjOffset -= std::abs(MinFixedObjOffset) % 8; 3844 int64_t UnwindHelpOffset = MinFixedObjOffset - SlotSize; 3845 int UnwindHelpFI = 3846 MFI.CreateFixedObject(SlotSize, UnwindHelpOffset, /*IsImmutable=*/false); 3847 EHInfo.UnwindHelpFrameIdx = UnwindHelpFI; 3848 3849 // Store -2 into UnwindHelp on function entry. We have to scan forwards past 3850 // other frame setup instructions. 3851 MachineBasicBlock &MBB = MF.front(); 3852 auto MBBI = MBB.begin(); 3853 while (MBBI != MBB.end() && MBBI->getFlag(MachineInstr::FrameSetup)) 3854 ++MBBI; 3855 3856 DebugLoc DL = MBB.findDebugLoc(MBBI); 3857 addFrameReference(BuildMI(MBB, MBBI, DL, TII.get(X86::MOV64mi32)), 3858 UnwindHelpFI) 3859 .addImm(-2); 3860 } 3861 3862 void X86FrameLowering::processFunctionBeforeFrameIndicesReplaced( 3863 MachineFunction &MF, RegScavenger *RS) const { 3864 if (STI.is32Bit() && MF.hasEHFunclets()) 3865 restoreWinEHStackPointersInParent(MF); 3866 } 3867 3868 void X86FrameLowering::restoreWinEHStackPointersInParent( 3869 MachineFunction &MF) const { 3870 // 32-bit functions have to restore stack pointers when control is transferred 3871 // back to the parent function. These blocks are identified as eh pads that 3872 // are not funclet entries. 3873 bool IsSEH = isAsynchronousEHPersonality( 3874 classifyEHPersonality(MF.getFunction().getPersonalityFn())); 3875 for (MachineBasicBlock &MBB : MF) { 3876 bool NeedsRestore = MBB.isEHPad() && !MBB.isEHFuncletEntry(); 3877 if (NeedsRestore) 3878 restoreWin32EHStackPointers(MBB, MBB.begin(), DebugLoc(), 3879 /*RestoreSP=*/IsSEH); 3880 } 3881 } 3882