1 //===-- FunctionLoweringInfo.cpp ------------------------------------------===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 // This implements routines for translating functions from LLVM IR into 11 // Machine IR. 12 // 13 //===----------------------------------------------------------------------===// 14 15 #include "llvm/CodeGen/FunctionLoweringInfo.h" 16 #include "llvm/ADT/PostOrderIterator.h" 17 #include "llvm/CodeGen/Analysis.h" 18 #include "llvm/CodeGen/MachineFrameInfo.h" 19 #include "llvm/CodeGen/MachineFunction.h" 20 #include "llvm/CodeGen/MachineInstrBuilder.h" 21 #include "llvm/CodeGen/MachineModuleInfo.h" 22 #include "llvm/CodeGen/MachineRegisterInfo.h" 23 #include "llvm/CodeGen/WinEHFuncInfo.h" 24 #include "llvm/IR/DataLayout.h" 25 #include "llvm/IR/DebugInfo.h" 26 #include "llvm/IR/DerivedTypes.h" 27 #include "llvm/IR/Function.h" 28 #include "llvm/IR/Instructions.h" 29 #include "llvm/IR/IntrinsicInst.h" 30 #include "llvm/IR/LLVMContext.h" 31 #include "llvm/IR/Module.h" 32 #include "llvm/Support/Debug.h" 33 #include "llvm/Support/ErrorHandling.h" 34 #include "llvm/Support/MathExtras.h" 35 #include "llvm/Support/raw_ostream.h" 36 #include "llvm/Target/TargetFrameLowering.h" 37 #include "llvm/Target/TargetInstrInfo.h" 38 #include "llvm/Target/TargetLowering.h" 39 #include "llvm/Target/TargetOptions.h" 40 #include "llvm/Target/TargetRegisterInfo.h" 41 #include "llvm/Target/TargetSubtargetInfo.h" 42 #include <algorithm> 43 using namespace llvm; 44 45 #define DEBUG_TYPE "function-lowering-info" 46 47 /// isUsedOutsideOfDefiningBlock - Return true if this instruction is used by 48 /// PHI nodes or outside of the basic block that defines it, or used by a 49 /// switch or atomic instruction, which may expand to multiple basic blocks. 50 static bool isUsedOutsideOfDefiningBlock(const Instruction *I) { 51 if (I->use_empty()) return false; 52 if (isa<PHINode>(I)) return true; 53 const BasicBlock *BB = I->getParent(); 54 for (const User *U : I->users()) 55 if (cast<Instruction>(U)->getParent() != BB || isa<PHINode>(U)) 56 return true; 57 58 return false; 59 } 60 61 static ISD::NodeType getPreferredExtendForValue(const Value *V) { 62 // For the users of the source value being used for compare instruction, if 63 // the number of signed predicate is greater than unsigned predicate, we 64 // prefer to use SIGN_EXTEND. 65 // 66 // With this optimization, we would be able to reduce some redundant sign or 67 // zero extension instruction, and eventually more machine CSE opportunities 68 // can be exposed. 69 ISD::NodeType ExtendKind = ISD::ANY_EXTEND; 70 unsigned NumOfSigned = 0, NumOfUnsigned = 0; 71 for (const User *U : V->users()) { 72 if (const auto *CI = dyn_cast<CmpInst>(U)) { 73 NumOfSigned += CI->isSigned(); 74 NumOfUnsigned += CI->isUnsigned(); 75 } 76 } 77 if (NumOfSigned > NumOfUnsigned) 78 ExtendKind = ISD::SIGN_EXTEND; 79 80 return ExtendKind; 81 } 82 83 void FunctionLoweringInfo::set(const Function &fn, MachineFunction &mf, 84 SelectionDAG *DAG) { 85 Fn = &fn; 86 MF = &mf; 87 TLI = MF->getSubtarget().getTargetLowering(); 88 RegInfo = &MF->getRegInfo(); 89 MachineModuleInfo &MMI = MF->getMMI(); 90 91 // Check whether the function can return without sret-demotion. 92 SmallVector<ISD::OutputArg, 4> Outs; 93 GetReturnInfo(Fn->getReturnType(), Fn->getAttributes(), Outs, *TLI); 94 CanLowerReturn = TLI->CanLowerReturn(Fn->getCallingConv(), *MF, 95 Fn->isVarArg(), Outs, Fn->getContext()); 96 97 // Initialize the mapping of values to registers. This is only set up for 98 // instruction values that are used outside of the block that defines 99 // them. 100 Function::const_iterator BB = Fn->begin(), EB = Fn->end(); 101 for (; BB != EB; ++BB) 102 for (BasicBlock::const_iterator I = BB->begin(), E = BB->end(); 103 I != E; ++I) { 104 if (const AllocaInst *AI = dyn_cast<AllocaInst>(I)) { 105 // Static allocas can be folded into the initial stack frame adjustment. 106 if (AI->isStaticAlloca()) { 107 const ConstantInt *CUI = cast<ConstantInt>(AI->getArraySize()); 108 Type *Ty = AI->getAllocatedType(); 109 uint64_t TySize = TLI->getDataLayout()->getTypeAllocSize(Ty); 110 unsigned Align = 111 std::max((unsigned)TLI->getDataLayout()->getPrefTypeAlignment(Ty), 112 AI->getAlignment()); 113 114 TySize *= CUI->getZExtValue(); // Get total allocated size. 115 if (TySize == 0) TySize = 1; // Don't create zero-sized stack objects. 116 117 StaticAllocaMap[AI] = 118 MF->getFrameInfo()->CreateStackObject(TySize, Align, false, AI); 119 120 } else { 121 unsigned Align = std::max( 122 (unsigned)TLI->getDataLayout()->getPrefTypeAlignment( 123 AI->getAllocatedType()), 124 AI->getAlignment()); 125 unsigned StackAlign = 126 MF->getSubtarget().getFrameLowering()->getStackAlignment(); 127 if (Align <= StackAlign) 128 Align = 0; 129 // Inform the Frame Information that we have variable-sized objects. 130 MF->getFrameInfo()->CreateVariableSizedObject(Align ? Align : 1, AI); 131 } 132 } 133 134 // Look for inline asm that clobbers the SP register. 135 if (isa<CallInst>(I) || isa<InvokeInst>(I)) { 136 ImmutableCallSite CS(I); 137 if (isa<InlineAsm>(CS.getCalledValue())) { 138 unsigned SP = TLI->getStackPointerRegisterToSaveRestore(); 139 const TargetRegisterInfo *TRI = MF->getSubtarget().getRegisterInfo(); 140 std::vector<TargetLowering::AsmOperandInfo> Ops = 141 TLI->ParseConstraints(TRI, CS); 142 for (size_t I = 0, E = Ops.size(); I != E; ++I) { 143 TargetLowering::AsmOperandInfo &Op = Ops[I]; 144 if (Op.Type == InlineAsm::isClobber) { 145 // Clobbers don't have SDValue operands, hence SDValue(). 146 TLI->ComputeConstraintToUse(Op, SDValue(), DAG); 147 std::pair<unsigned, const TargetRegisterClass *> PhysReg = 148 TLI->getRegForInlineAsmConstraint(TRI, Op.ConstraintCode, 149 Op.ConstraintVT); 150 if (PhysReg.first == SP) 151 MF->getFrameInfo()->setHasInlineAsmWithSPAdjust(true); 152 } 153 } 154 } 155 } 156 157 // Look for calls to the @llvm.va_start intrinsic. We can omit some 158 // prologue boilerplate for variadic functions that don't examine their 159 // arguments. 160 if (const auto *II = dyn_cast<IntrinsicInst>(I)) { 161 if (II->getIntrinsicID() == Intrinsic::vastart) 162 MF->getFrameInfo()->setHasVAStart(true); 163 } 164 165 // If we have a musttail call in a variadic funciton, we need to ensure we 166 // forward implicit register parameters. 167 if (const auto *CI = dyn_cast<CallInst>(I)) { 168 if (CI->isMustTailCall() && Fn->isVarArg()) 169 MF->getFrameInfo()->setHasMustTailInVarArgFunc(true); 170 } 171 172 // Mark values used outside their block as exported, by allocating 173 // a virtual register for them. 174 if (isUsedOutsideOfDefiningBlock(I)) 175 if (!isa<AllocaInst>(I) || 176 !StaticAllocaMap.count(cast<AllocaInst>(I))) 177 InitializeRegForValue(I); 178 179 // Collect llvm.dbg.declare information. This is done now instead of 180 // during the initial isel pass through the IR so that it is done 181 // in a predictable order. 182 if (const DbgDeclareInst *DI = dyn_cast<DbgDeclareInst>(I)) { 183 assert(DI->getVariable() && "Missing variable"); 184 assert(DI->getDebugLoc() && "Missing location"); 185 if (MMI.hasDebugInfo()) { 186 // Don't handle byval struct arguments or VLAs, for example. 187 // Non-byval arguments are handled here (they refer to the stack 188 // temporary alloca at this point). 189 const Value *Address = DI->getAddress(); 190 if (Address) { 191 if (const BitCastInst *BCI = dyn_cast<BitCastInst>(Address)) 192 Address = BCI->getOperand(0); 193 if (const AllocaInst *AI = dyn_cast<AllocaInst>(Address)) { 194 DenseMap<const AllocaInst *, int>::iterator SI = 195 StaticAllocaMap.find(AI); 196 if (SI != StaticAllocaMap.end()) { // Check for VLAs. 197 int FI = SI->second; 198 MMI.setVariableDbgInfo(DI->getVariable(), DI->getExpression(), 199 FI, DI->getDebugLoc()); 200 } 201 } 202 } 203 } 204 } 205 206 // Decide the preferred extend type for a value. 207 PreferredExtendType[I] = getPreferredExtendForValue(I); 208 } 209 210 // Create an initial MachineBasicBlock for each LLVM BasicBlock in F. This 211 // also creates the initial PHI MachineInstrs, though none of the input 212 // operands are populated. 213 for (BB = Fn->begin(); BB != EB; ++BB) { 214 MachineBasicBlock *MBB = mf.CreateMachineBasicBlock(BB); 215 MBBMap[BB] = MBB; 216 MF->push_back(MBB); 217 218 // Transfer the address-taken flag. This is necessary because there could 219 // be multiple MachineBasicBlocks corresponding to one BasicBlock, and only 220 // the first one should be marked. 221 if (BB->hasAddressTaken()) 222 MBB->setHasAddressTaken(); 223 224 // Create Machine PHI nodes for LLVM PHI nodes, lowering them as 225 // appropriate. 226 for (BasicBlock::const_iterator I = BB->begin(); 227 const PHINode *PN = dyn_cast<PHINode>(I); ++I) { 228 if (PN->use_empty()) continue; 229 230 // Skip empty types 231 if (PN->getType()->isEmptyTy()) 232 continue; 233 234 DebugLoc DL = PN->getDebugLoc(); 235 unsigned PHIReg = ValueMap[PN]; 236 assert(PHIReg && "PHI node does not have an assigned virtual register!"); 237 238 SmallVector<EVT, 4> ValueVTs; 239 ComputeValueVTs(*TLI, PN->getType(), ValueVTs); 240 for (unsigned vti = 0, vte = ValueVTs.size(); vti != vte; ++vti) { 241 EVT VT = ValueVTs[vti]; 242 unsigned NumRegisters = TLI->getNumRegisters(Fn->getContext(), VT); 243 const TargetInstrInfo *TII = MF->getSubtarget().getInstrInfo(); 244 for (unsigned i = 0; i != NumRegisters; ++i) 245 BuildMI(MBB, DL, TII->get(TargetOpcode::PHI), PHIReg + i); 246 PHIReg += NumRegisters; 247 } 248 } 249 } 250 251 // Mark landing pad blocks. 252 SmallVector<const LandingPadInst *, 4> LPads; 253 for (BB = Fn->begin(); BB != EB; ++BB) { 254 if (const auto *Invoke = dyn_cast<InvokeInst>(BB->getTerminator())) 255 MBBMap[Invoke->getSuccessor(1)]->setIsLandingPad(); 256 if (BB->isLandingPad()) 257 LPads.push_back(BB->getLandingPadInst()); 258 } 259 260 // If this is an MSVC EH personality, we need to do a bit more work. 261 EHPersonality Personality = EHPersonality::Unknown; 262 if (Fn->hasPersonalityFn()) 263 Personality = classifyEHPersonality(Fn->getPersonalityFn()); 264 if (!isMSVCEHPersonality(Personality)) 265 return; 266 267 if (Personality == EHPersonality::MSVC_Win64SEH || 268 Personality == EHPersonality::MSVC_X86SEH) { 269 addSEHHandlersForLPads(LPads); 270 } 271 272 WinEHFuncInfo &EHInfo = MMI.getWinEHFuncInfo(&fn); 273 if (Personality == EHPersonality::MSVC_CXX) { 274 const Function *WinEHParentFn = MMI.getWinEHParent(&fn); 275 calculateWinCXXEHStateNumbers(WinEHParentFn, EHInfo); 276 } 277 278 // Copy the state numbers to LandingPadInfo for the current function, which 279 // could be a handler or the parent. This should happen for 32-bit SEH and 280 // C++ EH. 281 if (Personality == EHPersonality::MSVC_CXX || 282 Personality == EHPersonality::MSVC_X86SEH) { 283 for (const LandingPadInst *LP : LPads) { 284 MachineBasicBlock *LPadMBB = MBBMap[LP->getParent()]; 285 MMI.addWinEHState(LPadMBB, EHInfo.LandingPadStateMap[LP]); 286 } 287 } 288 } 289 290 void FunctionLoweringInfo::addSEHHandlersForLPads( 291 ArrayRef<const LandingPadInst *> LPads) { 292 MachineModuleInfo &MMI = MF->getMMI(); 293 294 // Iterate over all landing pads with llvm.eh.actions calls. 295 for (const LandingPadInst *LP : LPads) { 296 const IntrinsicInst *ActionsCall = 297 dyn_cast<IntrinsicInst>(LP->getNextNode()); 298 if (!ActionsCall || 299 ActionsCall->getIntrinsicID() != Intrinsic::eh_actions) 300 continue; 301 302 // Parse the llvm.eh.actions call we found. 303 MachineBasicBlock *LPadMBB = MBBMap[LP->getParent()]; 304 SmallVector<std::unique_ptr<ActionHandler>, 4> Actions; 305 parseEHActions(ActionsCall, Actions); 306 307 // Iterate EH actions from most to least precedence, which means 308 // iterating in reverse. 309 for (auto I = Actions.rbegin(), E = Actions.rend(); I != E; ++I) { 310 ActionHandler *Action = I->get(); 311 if (auto *CH = dyn_cast<CatchHandler>(Action)) { 312 const auto *Filter = 313 dyn_cast<Function>(CH->getSelector()->stripPointerCasts()); 314 assert((Filter || CH->getSelector()->isNullValue()) && 315 "expected function or catch-all"); 316 const auto *RecoverBA = 317 cast<BlockAddress>(CH->getHandlerBlockOrFunc()); 318 MMI.addSEHCatchHandler(LPadMBB, Filter, RecoverBA); 319 } else { 320 assert(isa<CleanupHandler>(Action)); 321 const auto *Fini = cast<Function>(Action->getHandlerBlockOrFunc()); 322 MMI.addSEHCleanupHandler(LPadMBB, Fini); 323 } 324 } 325 } 326 } 327 328 /// clear - Clear out all the function-specific state. This returns this 329 /// FunctionLoweringInfo to an empty state, ready to be used for a 330 /// different function. 331 void FunctionLoweringInfo::clear() { 332 assert(CatchInfoFound.size() == CatchInfoLost.size() && 333 "Not all catch info was assigned to a landing pad!"); 334 335 MBBMap.clear(); 336 ValueMap.clear(); 337 StaticAllocaMap.clear(); 338 #ifndef NDEBUG 339 CatchInfoLost.clear(); 340 CatchInfoFound.clear(); 341 #endif 342 LiveOutRegInfo.clear(); 343 VisitedBBs.clear(); 344 ArgDbgValues.clear(); 345 ByValArgFrameIndexMap.clear(); 346 RegFixups.clear(); 347 StatepointStackSlots.clear(); 348 StatepointRelocatedValues.clear(); 349 PreferredExtendType.clear(); 350 } 351 352 /// CreateReg - Allocate a single virtual register for the given type. 353 unsigned FunctionLoweringInfo::CreateReg(MVT VT) { 354 return RegInfo->createVirtualRegister( 355 MF->getSubtarget().getTargetLowering()->getRegClassFor(VT)); 356 } 357 358 /// CreateRegs - Allocate the appropriate number of virtual registers of 359 /// the correctly promoted or expanded types. Assign these registers 360 /// consecutive vreg numbers and return the first assigned number. 361 /// 362 /// In the case that the given value has struct or array type, this function 363 /// will assign registers for each member or element. 364 /// 365 unsigned FunctionLoweringInfo::CreateRegs(Type *Ty) { 366 const TargetLowering *TLI = MF->getSubtarget().getTargetLowering(); 367 368 SmallVector<EVT, 4> ValueVTs; 369 ComputeValueVTs(*TLI, Ty, ValueVTs); 370 371 unsigned FirstReg = 0; 372 for (unsigned Value = 0, e = ValueVTs.size(); Value != e; ++Value) { 373 EVT ValueVT = ValueVTs[Value]; 374 MVT RegisterVT = TLI->getRegisterType(Ty->getContext(), ValueVT); 375 376 unsigned NumRegs = TLI->getNumRegisters(Ty->getContext(), ValueVT); 377 for (unsigned i = 0; i != NumRegs; ++i) { 378 unsigned R = CreateReg(RegisterVT); 379 if (!FirstReg) FirstReg = R; 380 } 381 } 382 return FirstReg; 383 } 384 385 /// GetLiveOutRegInfo - Gets LiveOutInfo for a register, returning NULL if the 386 /// register is a PHI destination and the PHI's LiveOutInfo is not valid. If 387 /// the register's LiveOutInfo is for a smaller bit width, it is extended to 388 /// the larger bit width by zero extension. The bit width must be no smaller 389 /// than the LiveOutInfo's existing bit width. 390 const FunctionLoweringInfo::LiveOutInfo * 391 FunctionLoweringInfo::GetLiveOutRegInfo(unsigned Reg, unsigned BitWidth) { 392 if (!LiveOutRegInfo.inBounds(Reg)) 393 return nullptr; 394 395 LiveOutInfo *LOI = &LiveOutRegInfo[Reg]; 396 if (!LOI->IsValid) 397 return nullptr; 398 399 if (BitWidth > LOI->KnownZero.getBitWidth()) { 400 LOI->NumSignBits = 1; 401 LOI->KnownZero = LOI->KnownZero.zextOrTrunc(BitWidth); 402 LOI->KnownOne = LOI->KnownOne.zextOrTrunc(BitWidth); 403 } 404 405 return LOI; 406 } 407 408 /// ComputePHILiveOutRegInfo - Compute LiveOutInfo for a PHI's destination 409 /// register based on the LiveOutInfo of its operands. 410 void FunctionLoweringInfo::ComputePHILiveOutRegInfo(const PHINode *PN) { 411 Type *Ty = PN->getType(); 412 if (!Ty->isIntegerTy() || Ty->isVectorTy()) 413 return; 414 415 SmallVector<EVT, 1> ValueVTs; 416 ComputeValueVTs(*TLI, Ty, ValueVTs); 417 assert(ValueVTs.size() == 1 && 418 "PHIs with non-vector integer types should have a single VT."); 419 EVT IntVT = ValueVTs[0]; 420 421 if (TLI->getNumRegisters(PN->getContext(), IntVT) != 1) 422 return; 423 IntVT = TLI->getTypeToTransformTo(PN->getContext(), IntVT); 424 unsigned BitWidth = IntVT.getSizeInBits(); 425 426 unsigned DestReg = ValueMap[PN]; 427 if (!TargetRegisterInfo::isVirtualRegister(DestReg)) 428 return; 429 LiveOutRegInfo.grow(DestReg); 430 LiveOutInfo &DestLOI = LiveOutRegInfo[DestReg]; 431 432 Value *V = PN->getIncomingValue(0); 433 if (isa<UndefValue>(V) || isa<ConstantExpr>(V)) { 434 DestLOI.NumSignBits = 1; 435 APInt Zero(BitWidth, 0); 436 DestLOI.KnownZero = Zero; 437 DestLOI.KnownOne = Zero; 438 return; 439 } 440 441 if (ConstantInt *CI = dyn_cast<ConstantInt>(V)) { 442 APInt Val = CI->getValue().zextOrTrunc(BitWidth); 443 DestLOI.NumSignBits = Val.getNumSignBits(); 444 DestLOI.KnownZero = ~Val; 445 DestLOI.KnownOne = Val; 446 } else { 447 assert(ValueMap.count(V) && "V should have been placed in ValueMap when its" 448 "CopyToReg node was created."); 449 unsigned SrcReg = ValueMap[V]; 450 if (!TargetRegisterInfo::isVirtualRegister(SrcReg)) { 451 DestLOI.IsValid = false; 452 return; 453 } 454 const LiveOutInfo *SrcLOI = GetLiveOutRegInfo(SrcReg, BitWidth); 455 if (!SrcLOI) { 456 DestLOI.IsValid = false; 457 return; 458 } 459 DestLOI = *SrcLOI; 460 } 461 462 assert(DestLOI.KnownZero.getBitWidth() == BitWidth && 463 DestLOI.KnownOne.getBitWidth() == BitWidth && 464 "Masks should have the same bit width as the type."); 465 466 for (unsigned i = 1, e = PN->getNumIncomingValues(); i != e; ++i) { 467 Value *V = PN->getIncomingValue(i); 468 if (isa<UndefValue>(V) || isa<ConstantExpr>(V)) { 469 DestLOI.NumSignBits = 1; 470 APInt Zero(BitWidth, 0); 471 DestLOI.KnownZero = Zero; 472 DestLOI.KnownOne = Zero; 473 return; 474 } 475 476 if (ConstantInt *CI = dyn_cast<ConstantInt>(V)) { 477 APInt Val = CI->getValue().zextOrTrunc(BitWidth); 478 DestLOI.NumSignBits = std::min(DestLOI.NumSignBits, Val.getNumSignBits()); 479 DestLOI.KnownZero &= ~Val; 480 DestLOI.KnownOne &= Val; 481 continue; 482 } 483 484 assert(ValueMap.count(V) && "V should have been placed in ValueMap when " 485 "its CopyToReg node was created."); 486 unsigned SrcReg = ValueMap[V]; 487 if (!TargetRegisterInfo::isVirtualRegister(SrcReg)) { 488 DestLOI.IsValid = false; 489 return; 490 } 491 const LiveOutInfo *SrcLOI = GetLiveOutRegInfo(SrcReg, BitWidth); 492 if (!SrcLOI) { 493 DestLOI.IsValid = false; 494 return; 495 } 496 DestLOI.NumSignBits = std::min(DestLOI.NumSignBits, SrcLOI->NumSignBits); 497 DestLOI.KnownZero &= SrcLOI->KnownZero; 498 DestLOI.KnownOne &= SrcLOI->KnownOne; 499 } 500 } 501 502 /// setArgumentFrameIndex - Record frame index for the byval 503 /// argument. This overrides previous frame index entry for this argument, 504 /// if any. 505 void FunctionLoweringInfo::setArgumentFrameIndex(const Argument *A, 506 int FI) { 507 ByValArgFrameIndexMap[A] = FI; 508 } 509 510 /// getArgumentFrameIndex - Get frame index for the byval argument. 511 /// If the argument does not have any assigned frame index then 0 is 512 /// returned. 513 int FunctionLoweringInfo::getArgumentFrameIndex(const Argument *A) { 514 DenseMap<const Argument *, int>::iterator I = 515 ByValArgFrameIndexMap.find(A); 516 if (I != ByValArgFrameIndexMap.end()) 517 return I->second; 518 DEBUG(dbgs() << "Argument does not have assigned frame index!\n"); 519 return 0; 520 } 521 522 /// ComputeUsesVAFloatArgument - Determine if any floating-point values are 523 /// being passed to this variadic function, and set the MachineModuleInfo's 524 /// usesVAFloatArgument flag if so. This flag is used to emit an undefined 525 /// reference to _fltused on Windows, which will link in MSVCRT's 526 /// floating-point support. 527 void llvm::ComputeUsesVAFloatArgument(const CallInst &I, 528 MachineModuleInfo *MMI) 529 { 530 FunctionType *FT = cast<FunctionType>( 531 I.getCalledValue()->getType()->getContainedType(0)); 532 if (FT->isVarArg() && !MMI->usesVAFloatArgument()) { 533 for (unsigned i = 0, e = I.getNumArgOperands(); i != e; ++i) { 534 Type* T = I.getArgOperand(i)->getType(); 535 for (auto i : post_order(T)) { 536 if (i->isFloatingPointTy()) { 537 MMI->setUsesVAFloatArgument(true); 538 return; 539 } 540 } 541 } 542 } 543 } 544 545 /// AddLandingPadInfo - Extract the exception handling information from the 546 /// landingpad instruction and add them to the specified machine module info. 547 void llvm::AddLandingPadInfo(const LandingPadInst &I, MachineModuleInfo &MMI, 548 MachineBasicBlock *MBB) { 549 MMI.addPersonality( 550 MBB, 551 cast<Function>( 552 I.getParent()->getParent()->getPersonalityFn()->stripPointerCasts())); 553 554 if (I.isCleanup()) 555 MMI.addCleanup(MBB); 556 557 // FIXME: New EH - Add the clauses in reverse order. This isn't 100% correct, 558 // but we need to do it this way because of how the DWARF EH emitter 559 // processes the clauses. 560 for (unsigned i = I.getNumClauses(); i != 0; --i) { 561 Value *Val = I.getClause(i - 1); 562 if (I.isCatch(i - 1)) { 563 MMI.addCatchTypeInfo(MBB, 564 dyn_cast<GlobalValue>(Val->stripPointerCasts())); 565 } else { 566 // Add filters in a list. 567 Constant *CVal = cast<Constant>(Val); 568 SmallVector<const GlobalValue*, 4> FilterList; 569 for (User::op_iterator 570 II = CVal->op_begin(), IE = CVal->op_end(); II != IE; ++II) 571 FilterList.push_back(cast<GlobalValue>((*II)->stripPointerCasts())); 572 573 MMI.addFilterTypeInfo(MBB, FilterList); 574 } 575 } 576 } 577