1 //===- SelectionDAGISel.cpp - Implement the SelectionDAGISel class --------===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 // This implements the SelectionDAGISel class. 11 // 12 //===----------------------------------------------------------------------===// 13 14 #include "llvm/CodeGen/SelectionDAGISel.h" 15 #include "ScheduleDAGSDNodes.h" 16 #include "SelectionDAGBuilder.h" 17 #include "llvm/ADT/APInt.h" 18 #include "llvm/ADT/DenseMap.h" 19 #include "llvm/ADT/None.h" 20 #include "llvm/ADT/PostOrderIterator.h" 21 #include "llvm/ADT/STLExtras.h" 22 #include "llvm/ADT/SmallPtrSet.h" 23 #include "llvm/ADT/SmallSet.h" 24 #include "llvm/ADT/SmallVector.h" 25 #include "llvm/ADT/Statistic.h" 26 #include "llvm/ADT/StringRef.h" 27 #include "llvm/Analysis/AliasAnalysis.h" 28 #include "llvm/Analysis/BranchProbabilityInfo.h" 29 #include "llvm/Analysis/CFG.h" 30 #include "llvm/Analysis/OptimizationRemarkEmitter.h" 31 #include "llvm/Analysis/TargetLibraryInfo.h" 32 #include "llvm/Analysis/TargetTransformInfo.h" 33 #include "llvm/CodeGen/FastISel.h" 34 #include "llvm/CodeGen/FunctionLoweringInfo.h" 35 #include "llvm/CodeGen/GCMetadata.h" 36 #include "llvm/CodeGen/ISDOpcodes.h" 37 #include "llvm/CodeGen/MachineBasicBlock.h" 38 #include "llvm/CodeGen/MachineFrameInfo.h" 39 #include "llvm/CodeGen/MachineFunction.h" 40 #include "llvm/CodeGen/MachineFunctionPass.h" 41 #include "llvm/CodeGen/MachineInstr.h" 42 #include "llvm/CodeGen/MachineInstrBuilder.h" 43 #include "llvm/CodeGen/MachineMemOperand.h" 44 #include "llvm/CodeGen/MachineOperand.h" 45 #include "llvm/CodeGen/MachinePassRegistry.h" 46 #include "llvm/CodeGen/MachineRegisterInfo.h" 47 #include "llvm/CodeGen/SchedulerRegistry.h" 48 #include "llvm/CodeGen/SelectionDAG.h" 49 #include "llvm/CodeGen/SelectionDAGNodes.h" 50 #include "llvm/CodeGen/StackProtector.h" 51 #include "llvm/CodeGen/TargetInstrInfo.h" 52 #include "llvm/CodeGen/TargetLowering.h" 53 #include "llvm/CodeGen/TargetRegisterInfo.h" 54 #include "llvm/CodeGen/TargetSubtargetInfo.h" 55 #include "llvm/CodeGen/ValueTypes.h" 56 #include "llvm/IR/BasicBlock.h" 57 #include "llvm/IR/Constants.h" 58 #include "llvm/IR/DataLayout.h" 59 #include "llvm/IR/DebugInfoMetadata.h" 60 #include "llvm/IR/DebugLoc.h" 61 #include "llvm/IR/DiagnosticInfo.h" 62 #include "llvm/IR/Dominators.h" 63 #include "llvm/IR/Function.h" 64 #include "llvm/IR/InlineAsm.h" 65 #include "llvm/IR/InstrTypes.h" 66 #include "llvm/IR/Instruction.h" 67 #include "llvm/IR/Instructions.h" 68 #include "llvm/IR/IntrinsicInst.h" 69 #include "llvm/IR/Intrinsics.h" 70 #include "llvm/IR/Metadata.h" 71 #include "llvm/IR/Type.h" 72 #include "llvm/IR/User.h" 73 #include "llvm/IR/Value.h" 74 #include "llvm/MC/MCInstrDesc.h" 75 #include "llvm/MC/MCRegisterInfo.h" 76 #include "llvm/Pass.h" 77 #include "llvm/Support/BranchProbability.h" 78 #include "llvm/Support/Casting.h" 79 #include "llvm/Support/CodeGen.h" 80 #include "llvm/Support/CommandLine.h" 81 #include "llvm/Support/Compiler.h" 82 #include "llvm/Support/Debug.h" 83 #include "llvm/Support/ErrorHandling.h" 84 #include "llvm/Support/KnownBits.h" 85 #include "llvm/Support/MachineValueType.h" 86 #include "llvm/Support/Timer.h" 87 #include "llvm/Support/raw_ostream.h" 88 #include "llvm/Target/TargetIntrinsicInfo.h" 89 #include "llvm/Target/TargetMachine.h" 90 #include "llvm/Target/TargetOptions.h" 91 #include "llvm/Transforms/Utils/BasicBlockUtils.h" 92 #include <algorithm> 93 #include <cassert> 94 #include <cstdint> 95 #include <iterator> 96 #include <limits> 97 #include <memory> 98 #include <string> 99 #include <utility> 100 #include <vector> 101 102 using namespace llvm; 103 104 #define DEBUG_TYPE "isel" 105 106 STATISTIC(NumFastIselFailures, "Number of instructions fast isel failed on"); 107 STATISTIC(NumFastIselSuccess, "Number of instructions fast isel selected"); 108 STATISTIC(NumFastIselBlocks, "Number of blocks selected entirely by fast isel"); 109 STATISTIC(NumDAGBlocks, "Number of blocks selected using DAG"); 110 STATISTIC(NumDAGIselRetries,"Number of times dag isel has to try another path"); 111 STATISTIC(NumEntryBlocks, "Number of entry blocks encountered"); 112 STATISTIC(NumFastIselFailLowerArguments, 113 "Number of entry blocks where fast isel failed to lower arguments"); 114 115 static cl::opt<int> EnableFastISelAbort( 116 "fast-isel-abort", cl::Hidden, 117 cl::desc("Enable abort calls when \"fast\" instruction selection " 118 "fails to lower an instruction: 0 disable the abort, 1 will " 119 "abort but for args, calls and terminators, 2 will also " 120 "abort for argument lowering, and 3 will never fallback " 121 "to SelectionDAG.")); 122 123 static cl::opt<bool> EnableFastISelFallbackReport( 124 "fast-isel-report-on-fallback", cl::Hidden, 125 cl::desc("Emit a diagnostic when \"fast\" instruction selection " 126 "falls back to SelectionDAG.")); 127 128 static cl::opt<bool> 129 UseMBPI("use-mbpi", 130 cl::desc("use Machine Branch Probability Info"), 131 cl::init(true), cl::Hidden); 132 133 #ifndef NDEBUG 134 static cl::opt<std::string> 135 FilterDAGBasicBlockName("filter-view-dags", cl::Hidden, 136 cl::desc("Only display the basic block whose name " 137 "matches this for all view-*-dags options")); 138 static cl::opt<bool> 139 ViewDAGCombine1("view-dag-combine1-dags", cl::Hidden, 140 cl::desc("Pop up a window to show dags before the first " 141 "dag combine pass")); 142 static cl::opt<bool> 143 ViewLegalizeTypesDAGs("view-legalize-types-dags", cl::Hidden, 144 cl::desc("Pop up a window to show dags before legalize types")); 145 static cl::opt<bool> 146 ViewLegalizeDAGs("view-legalize-dags", cl::Hidden, 147 cl::desc("Pop up a window to show dags before legalize")); 148 static cl::opt<bool> 149 ViewDAGCombine2("view-dag-combine2-dags", cl::Hidden, 150 cl::desc("Pop up a window to show dags before the second " 151 "dag combine pass")); 152 static cl::opt<bool> 153 ViewDAGCombineLT("view-dag-combine-lt-dags", cl::Hidden, 154 cl::desc("Pop up a window to show dags before the post legalize types" 155 " dag combine pass")); 156 static cl::opt<bool> 157 ViewISelDAGs("view-isel-dags", cl::Hidden, 158 cl::desc("Pop up a window to show isel dags as they are selected")); 159 static cl::opt<bool> 160 ViewSchedDAGs("view-sched-dags", cl::Hidden, 161 cl::desc("Pop up a window to show sched dags as they are processed")); 162 static cl::opt<bool> 163 ViewSUnitDAGs("view-sunit-dags", cl::Hidden, 164 cl::desc("Pop up a window to show SUnit dags after they are processed")); 165 #else 166 static const bool ViewDAGCombine1 = false, 167 ViewLegalizeTypesDAGs = false, ViewLegalizeDAGs = false, 168 ViewDAGCombine2 = false, 169 ViewDAGCombineLT = false, 170 ViewISelDAGs = false, ViewSchedDAGs = false, 171 ViewSUnitDAGs = false; 172 #endif 173 174 //===---------------------------------------------------------------------===// 175 /// 176 /// RegisterScheduler class - Track the registration of instruction schedulers. 177 /// 178 //===---------------------------------------------------------------------===// 179 MachinePassRegistry RegisterScheduler::Registry; 180 181 //===---------------------------------------------------------------------===// 182 /// 183 /// ISHeuristic command line option for instruction schedulers. 184 /// 185 //===---------------------------------------------------------------------===// 186 static cl::opt<RegisterScheduler::FunctionPassCtor, false, 187 RegisterPassParser<RegisterScheduler>> 188 ISHeuristic("pre-RA-sched", 189 cl::init(&createDefaultScheduler), cl::Hidden, 190 cl::desc("Instruction schedulers available (before register" 191 " allocation):")); 192 193 static RegisterScheduler 194 defaultListDAGScheduler("default", "Best scheduler for the target", 195 createDefaultScheduler); 196 197 namespace llvm { 198 199 //===--------------------------------------------------------------------===// 200 /// This class is used by SelectionDAGISel to temporarily override 201 /// the optimization level on a per-function basis. 202 class OptLevelChanger { 203 SelectionDAGISel &IS; 204 CodeGenOpt::Level SavedOptLevel; 205 bool SavedFastISel; 206 207 public: 208 OptLevelChanger(SelectionDAGISel &ISel, 209 CodeGenOpt::Level NewOptLevel) : IS(ISel) { 210 SavedOptLevel = IS.OptLevel; 211 if (NewOptLevel == SavedOptLevel) 212 return; 213 IS.OptLevel = NewOptLevel; 214 IS.TM.setOptLevel(NewOptLevel); 215 DEBUG(dbgs() << "\nChanging optimization level for Function " 216 << IS.MF->getFunction().getName() << "\n"); 217 DEBUG(dbgs() << "\tBefore: -O" << SavedOptLevel 218 << " ; After: -O" << NewOptLevel << "\n"); 219 SavedFastISel = IS.TM.Options.EnableFastISel; 220 if (NewOptLevel == CodeGenOpt::None) { 221 IS.TM.setFastISel(IS.TM.getO0WantsFastISel()); 222 DEBUG(dbgs() << "\tFastISel is " 223 << (IS.TM.Options.EnableFastISel ? "enabled" : "disabled") 224 << "\n"); 225 } 226 } 227 228 ~OptLevelChanger() { 229 if (IS.OptLevel == SavedOptLevel) 230 return; 231 DEBUG(dbgs() << "\nRestoring optimization level for Function " 232 << IS.MF->getFunction().getName() << "\n"); 233 DEBUG(dbgs() << "\tBefore: -O" << IS.OptLevel 234 << " ; After: -O" << SavedOptLevel << "\n"); 235 IS.OptLevel = SavedOptLevel; 236 IS.TM.setOptLevel(SavedOptLevel); 237 IS.TM.setFastISel(SavedFastISel); 238 } 239 }; 240 241 //===--------------------------------------------------------------------===// 242 /// createDefaultScheduler - This creates an instruction scheduler appropriate 243 /// for the target. 244 ScheduleDAGSDNodes* createDefaultScheduler(SelectionDAGISel *IS, 245 CodeGenOpt::Level OptLevel) { 246 const TargetLowering *TLI = IS->TLI; 247 const TargetSubtargetInfo &ST = IS->MF->getSubtarget(); 248 249 // Try first to see if the Target has its own way of selecting a scheduler 250 if (auto *SchedulerCtor = ST.getDAGScheduler(OptLevel)) { 251 return SchedulerCtor(IS, OptLevel); 252 } 253 254 if (OptLevel == CodeGenOpt::None || 255 (ST.enableMachineScheduler() && ST.enableMachineSchedDefaultSched()) || 256 TLI->getSchedulingPreference() == Sched::Source) 257 return createSourceListDAGScheduler(IS, OptLevel); 258 if (TLI->getSchedulingPreference() == Sched::RegPressure) 259 return createBURRListDAGScheduler(IS, OptLevel); 260 if (TLI->getSchedulingPreference() == Sched::Hybrid) 261 return createHybridListDAGScheduler(IS, OptLevel); 262 if (TLI->getSchedulingPreference() == Sched::VLIW) 263 return createVLIWDAGScheduler(IS, OptLevel); 264 assert(TLI->getSchedulingPreference() == Sched::ILP && 265 "Unknown sched type!"); 266 return createILPListDAGScheduler(IS, OptLevel); 267 } 268 269 } // end namespace llvm 270 271 // EmitInstrWithCustomInserter - This method should be implemented by targets 272 // that mark instructions with the 'usesCustomInserter' flag. These 273 // instructions are special in various ways, which require special support to 274 // insert. The specified MachineInstr is created but not inserted into any 275 // basic blocks, and this method is called to expand it into a sequence of 276 // instructions, potentially also creating new basic blocks and control flow. 277 // When new basic blocks are inserted and the edges from MBB to its successors 278 // are modified, the method should insert pairs of <OldSucc, NewSucc> into the 279 // DenseMap. 280 MachineBasicBlock * 281 TargetLowering::EmitInstrWithCustomInserter(MachineInstr &MI, 282 MachineBasicBlock *MBB) const { 283 #ifndef NDEBUG 284 dbgs() << "If a target marks an instruction with " 285 "'usesCustomInserter', it must implement " 286 "TargetLowering::EmitInstrWithCustomInserter!"; 287 #endif 288 llvm_unreachable(nullptr); 289 } 290 291 void TargetLowering::AdjustInstrPostInstrSelection(MachineInstr &MI, 292 SDNode *Node) const { 293 assert(!MI.hasPostISelHook() && 294 "If a target marks an instruction with 'hasPostISelHook', " 295 "it must implement TargetLowering::AdjustInstrPostInstrSelection!"); 296 } 297 298 //===----------------------------------------------------------------------===// 299 // SelectionDAGISel code 300 //===----------------------------------------------------------------------===// 301 302 SelectionDAGISel::SelectionDAGISel(TargetMachine &tm, 303 CodeGenOpt::Level OL) : 304 MachineFunctionPass(ID), TM(tm), 305 FuncInfo(new FunctionLoweringInfo()), 306 CurDAG(new SelectionDAG(tm, OL)), 307 SDB(new SelectionDAGBuilder(*CurDAG, *FuncInfo, OL)), 308 AA(), GFI(), 309 OptLevel(OL), 310 DAGSize(0) { 311 initializeGCModuleInfoPass(*PassRegistry::getPassRegistry()); 312 initializeBranchProbabilityInfoWrapperPassPass( 313 *PassRegistry::getPassRegistry()); 314 initializeAAResultsWrapperPassPass(*PassRegistry::getPassRegistry()); 315 initializeTargetLibraryInfoWrapperPassPass( 316 *PassRegistry::getPassRegistry()); 317 } 318 319 SelectionDAGISel::~SelectionDAGISel() { 320 delete SDB; 321 delete CurDAG; 322 delete FuncInfo; 323 } 324 325 void SelectionDAGISel::getAnalysisUsage(AnalysisUsage &AU) const { 326 if (OptLevel != CodeGenOpt::None) 327 AU.addRequired<AAResultsWrapperPass>(); 328 AU.addRequired<GCModuleInfo>(); 329 AU.addRequired<StackProtector>(); 330 AU.addPreserved<StackProtector>(); 331 AU.addPreserved<GCModuleInfo>(); 332 AU.addRequired<TargetLibraryInfoWrapperPass>(); 333 AU.addRequired<TargetTransformInfoWrapperPass>(); 334 if (UseMBPI && OptLevel != CodeGenOpt::None) 335 AU.addRequired<BranchProbabilityInfoWrapperPass>(); 336 MachineFunctionPass::getAnalysisUsage(AU); 337 } 338 339 /// SplitCriticalSideEffectEdges - Look for critical edges with a PHI value that 340 /// may trap on it. In this case we have to split the edge so that the path 341 /// through the predecessor block that doesn't go to the phi block doesn't 342 /// execute the possibly trapping instruction. If available, we pass domtree 343 /// and loop info to be updated when we split critical edges. This is because 344 /// SelectionDAGISel preserves these analyses. 345 /// This is required for correctness, so it must be done at -O0. 346 /// 347 static void SplitCriticalSideEffectEdges(Function &Fn, DominatorTree *DT, 348 LoopInfo *LI) { 349 // Loop for blocks with phi nodes. 350 for (BasicBlock &BB : Fn) { 351 PHINode *PN = dyn_cast<PHINode>(BB.begin()); 352 if (!PN) continue; 353 354 ReprocessBlock: 355 // For each block with a PHI node, check to see if any of the input values 356 // are potentially trapping constant expressions. Constant expressions are 357 // the only potentially trapping value that can occur as the argument to a 358 // PHI. 359 for (BasicBlock::iterator I = BB.begin(); (PN = dyn_cast<PHINode>(I)); ++I) 360 for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) { 361 ConstantExpr *CE = dyn_cast<ConstantExpr>(PN->getIncomingValue(i)); 362 if (!CE || !CE->canTrap()) continue; 363 364 // The only case we have to worry about is when the edge is critical. 365 // Since this block has a PHI Node, we assume it has multiple input 366 // edges: check to see if the pred has multiple successors. 367 BasicBlock *Pred = PN->getIncomingBlock(i); 368 if (Pred->getTerminator()->getNumSuccessors() == 1) 369 continue; 370 371 // Okay, we have to split this edge. 372 SplitCriticalEdge( 373 Pred->getTerminator(), GetSuccessorNumber(Pred, &BB), 374 CriticalEdgeSplittingOptions(DT, LI).setMergeIdenticalEdges()); 375 goto ReprocessBlock; 376 } 377 } 378 } 379 380 bool SelectionDAGISel::runOnMachineFunction(MachineFunction &mf) { 381 // If we already selected that function, we do not need to run SDISel. 382 if (mf.getProperties().hasProperty( 383 MachineFunctionProperties::Property::Selected)) 384 return false; 385 // Do some sanity-checking on the command-line options. 386 assert((!EnableFastISelAbort || TM.Options.EnableFastISel) && 387 "-fast-isel-abort > 0 requires -fast-isel"); 388 389 const Function &Fn = mf.getFunction(); 390 MF = &mf; 391 392 // Reset the target options before resetting the optimization 393 // level below. 394 // FIXME: This is a horrible hack and should be processed via 395 // codegen looking at the optimization level explicitly when 396 // it wants to look at it. 397 TM.resetTargetOptions(Fn); 398 // Reset OptLevel to None for optnone functions. 399 CodeGenOpt::Level NewOptLevel = OptLevel; 400 if (OptLevel != CodeGenOpt::None && skipFunction(Fn)) 401 NewOptLevel = CodeGenOpt::None; 402 OptLevelChanger OLC(*this, NewOptLevel); 403 404 TII = MF->getSubtarget().getInstrInfo(); 405 TLI = MF->getSubtarget().getTargetLowering(); 406 RegInfo = &MF->getRegInfo(); 407 LibInfo = &getAnalysis<TargetLibraryInfoWrapperPass>().getTLI(); 408 GFI = Fn.hasGC() ? &getAnalysis<GCModuleInfo>().getFunctionInfo(Fn) : nullptr; 409 ORE = make_unique<OptimizationRemarkEmitter>(&Fn); 410 auto *DTWP = getAnalysisIfAvailable<DominatorTreeWrapperPass>(); 411 DominatorTree *DT = DTWP ? &DTWP->getDomTree() : nullptr; 412 auto *LIWP = getAnalysisIfAvailable<LoopInfoWrapperPass>(); 413 LoopInfo *LI = LIWP ? &LIWP->getLoopInfo() : nullptr; 414 415 DEBUG(dbgs() << "\n\n\n=== " << Fn.getName() << "\n"); 416 417 SplitCriticalSideEffectEdges(const_cast<Function &>(Fn), DT, LI); 418 419 CurDAG->init(*MF, *ORE, this, LibInfo, 420 getAnalysisIfAvailable<DivergenceAnalysis>()); 421 FuncInfo->set(Fn, *MF, CurDAG); 422 423 // Now get the optional analyzes if we want to. 424 // This is based on the possibly changed OptLevel (after optnone is taken 425 // into account). That's unfortunate but OK because it just means we won't 426 // ask for passes that have been required anyway. 427 428 if (UseMBPI && OptLevel != CodeGenOpt::None) 429 FuncInfo->BPI = &getAnalysis<BranchProbabilityInfoWrapperPass>().getBPI(); 430 else 431 FuncInfo->BPI = nullptr; 432 433 if (OptLevel != CodeGenOpt::None) 434 AA = &getAnalysis<AAResultsWrapperPass>().getAAResults(); 435 else 436 AA = nullptr; 437 438 SDB->init(GFI, AA, LibInfo); 439 440 MF->setHasInlineAsm(false); 441 442 FuncInfo->SplitCSR = false; 443 444 // We split CSR if the target supports it for the given function 445 // and the function has only return exits. 446 if (OptLevel != CodeGenOpt::None && TLI->supportSplitCSR(MF)) { 447 FuncInfo->SplitCSR = true; 448 449 // Collect all the return blocks. 450 for (const BasicBlock &BB : Fn) { 451 if (!succ_empty(&BB)) 452 continue; 453 454 const TerminatorInst *Term = BB.getTerminator(); 455 if (isa<UnreachableInst>(Term) || isa<ReturnInst>(Term)) 456 continue; 457 458 // Bail out if the exit block is not Return nor Unreachable. 459 FuncInfo->SplitCSR = false; 460 break; 461 } 462 } 463 464 MachineBasicBlock *EntryMBB = &MF->front(); 465 if (FuncInfo->SplitCSR) 466 // This performs initialization so lowering for SplitCSR will be correct. 467 TLI->initializeSplitCSR(EntryMBB); 468 469 SelectAllBasicBlocks(Fn); 470 if (FastISelFailed && EnableFastISelFallbackReport) { 471 DiagnosticInfoISelFallback DiagFallback(Fn); 472 Fn.getContext().diagnose(DiagFallback); 473 } 474 475 // If the first basic block in the function has live ins that need to be 476 // copied into vregs, emit the copies into the top of the block before 477 // emitting the code for the block. 478 const TargetRegisterInfo &TRI = *MF->getSubtarget().getRegisterInfo(); 479 RegInfo->EmitLiveInCopies(EntryMBB, TRI, *TII); 480 481 // Insert copies in the entry block and the return blocks. 482 if (FuncInfo->SplitCSR) { 483 SmallVector<MachineBasicBlock*, 4> Returns; 484 // Collect all the return blocks. 485 for (MachineBasicBlock &MBB : mf) { 486 if (!MBB.succ_empty()) 487 continue; 488 489 MachineBasicBlock::iterator Term = MBB.getFirstTerminator(); 490 if (Term != MBB.end() && Term->isReturn()) { 491 Returns.push_back(&MBB); 492 continue; 493 } 494 } 495 TLI->insertCopiesSplitCSR(EntryMBB, Returns); 496 } 497 498 DenseMap<unsigned, unsigned> LiveInMap; 499 if (!FuncInfo->ArgDbgValues.empty()) 500 for (std::pair<unsigned, unsigned> LI : RegInfo->liveins()) 501 if (LI.second) 502 LiveInMap.insert(LI); 503 504 // Insert DBG_VALUE instructions for function arguments to the entry block. 505 for (unsigned i = 0, e = FuncInfo->ArgDbgValues.size(); i != e; ++i) { 506 MachineInstr *MI = FuncInfo->ArgDbgValues[e-i-1]; 507 bool hasFI = MI->getOperand(0).isFI(); 508 unsigned Reg = 509 hasFI ? TRI.getFrameRegister(*MF) : MI->getOperand(0).getReg(); 510 if (TargetRegisterInfo::isPhysicalRegister(Reg)) 511 EntryMBB->insert(EntryMBB->begin(), MI); 512 else { 513 MachineInstr *Def = RegInfo->getVRegDef(Reg); 514 if (Def) { 515 MachineBasicBlock::iterator InsertPos = Def; 516 // FIXME: VR def may not be in entry block. 517 Def->getParent()->insert(std::next(InsertPos), MI); 518 } else 519 DEBUG(dbgs() << "Dropping debug info for dead vreg" 520 << TargetRegisterInfo::virtReg2Index(Reg) << "\n"); 521 } 522 523 // If Reg is live-in then update debug info to track its copy in a vreg. 524 DenseMap<unsigned, unsigned>::iterator LDI = LiveInMap.find(Reg); 525 if (LDI != LiveInMap.end()) { 526 assert(!hasFI && "There's no handling of frame pointer updating here yet " 527 "- add if needed"); 528 MachineInstr *Def = RegInfo->getVRegDef(LDI->second); 529 MachineBasicBlock::iterator InsertPos = Def; 530 const MDNode *Variable = MI->getDebugVariable(); 531 const MDNode *Expr = MI->getDebugExpression(); 532 DebugLoc DL = MI->getDebugLoc(); 533 bool IsIndirect = MI->isIndirectDebugValue(); 534 if (IsIndirect) 535 assert(MI->getOperand(1).getImm() == 0 && 536 "DBG_VALUE with nonzero offset"); 537 assert(cast<DILocalVariable>(Variable)->isValidLocationForIntrinsic(DL) && 538 "Expected inlined-at fields to agree"); 539 // Def is never a terminator here, so it is ok to increment InsertPos. 540 BuildMI(*EntryMBB, ++InsertPos, DL, TII->get(TargetOpcode::DBG_VALUE), 541 IsIndirect, LDI->second, Variable, Expr); 542 543 // If this vreg is directly copied into an exported register then 544 // that COPY instructions also need DBG_VALUE, if it is the only 545 // user of LDI->second. 546 MachineInstr *CopyUseMI = nullptr; 547 for (MachineRegisterInfo::use_instr_iterator 548 UI = RegInfo->use_instr_begin(LDI->second), 549 E = RegInfo->use_instr_end(); UI != E; ) { 550 MachineInstr *UseMI = &*(UI++); 551 if (UseMI->isDebugValue()) continue; 552 if (UseMI->isCopy() && !CopyUseMI && UseMI->getParent() == EntryMBB) { 553 CopyUseMI = UseMI; continue; 554 } 555 // Otherwise this is another use or second copy use. 556 CopyUseMI = nullptr; break; 557 } 558 if (CopyUseMI) { 559 // Use MI's debug location, which describes where Variable was 560 // declared, rather than whatever is attached to CopyUseMI. 561 MachineInstr *NewMI = 562 BuildMI(*MF, DL, TII->get(TargetOpcode::DBG_VALUE), IsIndirect, 563 CopyUseMI->getOperand(0).getReg(), Variable, Expr); 564 MachineBasicBlock::iterator Pos = CopyUseMI; 565 EntryMBB->insertAfter(Pos, NewMI); 566 } 567 } 568 } 569 570 // Determine if there are any calls in this machine function. 571 MachineFrameInfo &MFI = MF->getFrameInfo(); 572 for (const auto &MBB : *MF) { 573 if (MFI.hasCalls() && MF->hasInlineAsm()) 574 break; 575 576 for (const auto &MI : MBB) { 577 const MCInstrDesc &MCID = TII->get(MI.getOpcode()); 578 if ((MCID.isCall() && !MCID.isReturn()) || 579 MI.isStackAligningInlineAsm()) { 580 MFI.setHasCalls(true); 581 } 582 if (MI.isInlineAsm()) { 583 MF->setHasInlineAsm(true); 584 } 585 } 586 } 587 588 // Determine if there is a call to setjmp in the machine function. 589 MF->setExposesReturnsTwice(Fn.callsFunctionThatReturnsTwice()); 590 591 // Replace forward-declared registers with the registers containing 592 // the desired value. 593 MachineRegisterInfo &MRI = MF->getRegInfo(); 594 for (DenseMap<unsigned, unsigned>::iterator 595 I = FuncInfo->RegFixups.begin(), E = FuncInfo->RegFixups.end(); 596 I != E; ++I) { 597 unsigned From = I->first; 598 unsigned To = I->second; 599 // If To is also scheduled to be replaced, find what its ultimate 600 // replacement is. 601 while (true) { 602 DenseMap<unsigned, unsigned>::iterator J = FuncInfo->RegFixups.find(To); 603 if (J == E) break; 604 To = J->second; 605 } 606 // Make sure the new register has a sufficiently constrained register class. 607 if (TargetRegisterInfo::isVirtualRegister(From) && 608 TargetRegisterInfo::isVirtualRegister(To)) 609 MRI.constrainRegClass(To, MRI.getRegClass(From)); 610 // Replace it. 611 612 613 // Replacing one register with another won't touch the kill flags. 614 // We need to conservatively clear the kill flags as a kill on the old 615 // register might dominate existing uses of the new register. 616 if (!MRI.use_empty(To)) 617 MRI.clearKillFlags(From); 618 MRI.replaceRegWith(From, To); 619 } 620 621 TLI->finalizeLowering(*MF); 622 623 // Release function-specific state. SDB and CurDAG are already cleared 624 // at this point. 625 FuncInfo->clear(); 626 627 DEBUG(dbgs() << "*** MachineFunction at end of ISel ***\n"); 628 DEBUG(MF->print(dbgs())); 629 630 return true; 631 } 632 633 static void reportFastISelFailure(MachineFunction &MF, 634 OptimizationRemarkEmitter &ORE, 635 OptimizationRemarkMissed &R, 636 bool ShouldAbort) { 637 // Print the function name explicitly if we don't have a debug location (which 638 // makes the diagnostic less useful) or if we're going to emit a raw error. 639 if (!R.getLocation().isValid() || ShouldAbort) 640 R << (" (in function: " + MF.getName() + ")").str(); 641 642 if (ShouldAbort) 643 report_fatal_error(R.getMsg()); 644 645 ORE.emit(R); 646 } 647 648 void SelectionDAGISel::SelectBasicBlock(BasicBlock::const_iterator Begin, 649 BasicBlock::const_iterator End, 650 bool &HadTailCall) { 651 // Allow creating illegal types during DAG building for the basic block. 652 CurDAG->NewNodesMustHaveLegalTypes = false; 653 654 // Lower the instructions. If a call is emitted as a tail call, cease emitting 655 // nodes for this block. 656 for (BasicBlock::const_iterator I = Begin; I != End && !SDB->HasTailCall; ++I) { 657 if (!ElidedArgCopyInstrs.count(&*I)) 658 SDB->visit(*I); 659 } 660 661 // Make sure the root of the DAG is up-to-date. 662 CurDAG->setRoot(SDB->getControlRoot()); 663 HadTailCall = SDB->HasTailCall; 664 SDB->clear(); 665 666 // Final step, emit the lowered DAG as machine code. 667 CodeGenAndEmitDAG(); 668 } 669 670 void SelectionDAGISel::ComputeLiveOutVRegInfo() { 671 SmallPtrSet<SDNode*, 16> VisitedNodes; 672 SmallVector<SDNode*, 128> Worklist; 673 674 Worklist.push_back(CurDAG->getRoot().getNode()); 675 676 KnownBits Known; 677 678 do { 679 SDNode *N = Worklist.pop_back_val(); 680 681 // If we've already seen this node, ignore it. 682 if (!VisitedNodes.insert(N).second) 683 continue; 684 685 // Otherwise, add all chain operands to the worklist. 686 for (const SDValue &Op : N->op_values()) 687 if (Op.getValueType() == MVT::Other) 688 Worklist.push_back(Op.getNode()); 689 690 // If this is a CopyToReg with a vreg dest, process it. 691 if (N->getOpcode() != ISD::CopyToReg) 692 continue; 693 694 unsigned DestReg = cast<RegisterSDNode>(N->getOperand(1))->getReg(); 695 if (!TargetRegisterInfo::isVirtualRegister(DestReg)) 696 continue; 697 698 // Ignore non-scalar or non-integer values. 699 SDValue Src = N->getOperand(2); 700 EVT SrcVT = Src.getValueType(); 701 if (!SrcVT.isInteger() || SrcVT.isVector()) 702 continue; 703 704 unsigned NumSignBits = CurDAG->ComputeNumSignBits(Src); 705 CurDAG->computeKnownBits(Src, Known); 706 FuncInfo->AddLiveOutRegInfo(DestReg, NumSignBits, Known); 707 } while (!Worklist.empty()); 708 } 709 710 void SelectionDAGISel::CodeGenAndEmitDAG() { 711 StringRef GroupName = "sdag"; 712 StringRef GroupDescription = "Instruction Selection and Scheduling"; 713 std::string BlockName; 714 int BlockNumber = -1; 715 (void)BlockNumber; 716 bool MatchFilterBB = false; (void)MatchFilterBB; 717 TargetTransformInfo &TTI = 718 getAnalysis<TargetTransformInfoWrapperPass>().getTTI(*FuncInfo->Fn); 719 720 // Pre-type legalization allow creation of any node types. 721 CurDAG->NewNodesMustHaveLegalTypes = false; 722 723 #ifndef NDEBUG 724 MatchFilterBB = (FilterDAGBasicBlockName.empty() || 725 FilterDAGBasicBlockName == 726 FuncInfo->MBB->getBasicBlock()->getName().str()); 727 #endif 728 #ifdef NDEBUG 729 if (ViewDAGCombine1 || ViewLegalizeTypesDAGs || ViewLegalizeDAGs || 730 ViewDAGCombine2 || ViewDAGCombineLT || ViewISelDAGs || ViewSchedDAGs || 731 ViewSUnitDAGs) 732 #endif 733 { 734 BlockNumber = FuncInfo->MBB->getNumber(); 735 BlockName = 736 (MF->getName() + ":" + FuncInfo->MBB->getBasicBlock()->getName()).str(); 737 } 738 DEBUG(dbgs() << "Initial selection DAG: " << printMBBReference(*FuncInfo->MBB) 739 << " '" << BlockName << "'\n"; 740 CurDAG->dump()); 741 742 if (ViewDAGCombine1 && MatchFilterBB) 743 CurDAG->viewGraph("dag-combine1 input for " + BlockName); 744 745 // Run the DAG combiner in pre-legalize mode. 746 { 747 NamedRegionTimer T("combine1", "DAG Combining 1", GroupName, 748 GroupDescription, TimePassesIsEnabled); 749 CurDAG->Combine(BeforeLegalizeTypes, AA, OptLevel); 750 } 751 752 if (TTI.hasBranchDivergence()) 753 CurDAG->VerifyDAGDiverence(); 754 755 DEBUG(dbgs() << "Optimized lowered selection DAG: " 756 << printMBBReference(*FuncInfo->MBB) << " '" << BlockName 757 << "'\n"; 758 CurDAG->dump()); 759 760 // Second step, hack on the DAG until it only uses operations and types that 761 // the target supports. 762 if (ViewLegalizeTypesDAGs && MatchFilterBB) 763 CurDAG->viewGraph("legalize-types input for " + BlockName); 764 765 bool Changed; 766 { 767 NamedRegionTimer T("legalize_types", "Type Legalization", GroupName, 768 GroupDescription, TimePassesIsEnabled); 769 Changed = CurDAG->LegalizeTypes(); 770 } 771 772 if (TTI.hasBranchDivergence()) 773 CurDAG->VerifyDAGDiverence(); 774 775 DEBUG(dbgs() << "Type-legalized selection DAG: " 776 << printMBBReference(*FuncInfo->MBB) << " '" << BlockName 777 << "'\n"; 778 CurDAG->dump()); 779 780 // Only allow creation of legal node types. 781 CurDAG->NewNodesMustHaveLegalTypes = true; 782 783 if (Changed) { 784 if (ViewDAGCombineLT && MatchFilterBB) 785 CurDAG->viewGraph("dag-combine-lt input for " + BlockName); 786 787 // Run the DAG combiner in post-type-legalize mode. 788 { 789 NamedRegionTimer T("combine_lt", "DAG Combining after legalize types", 790 GroupName, GroupDescription, TimePassesIsEnabled); 791 CurDAG->Combine(AfterLegalizeTypes, AA, OptLevel); 792 } 793 794 if (TTI.hasBranchDivergence()) 795 CurDAG->VerifyDAGDiverence(); 796 797 DEBUG(dbgs() << "Optimized type-legalized selection DAG: " 798 << printMBBReference(*FuncInfo->MBB) << " '" << BlockName 799 << "'\n"; 800 CurDAG->dump()); 801 } 802 803 { 804 NamedRegionTimer T("legalize_vec", "Vector Legalization", GroupName, 805 GroupDescription, TimePassesIsEnabled); 806 Changed = CurDAG->LegalizeVectors(); 807 } 808 809 if (Changed) { 810 DEBUG(dbgs() << "Vector-legalized selection DAG: " 811 << printMBBReference(*FuncInfo->MBB) << " '" << BlockName 812 << "'\n"; 813 CurDAG->dump()); 814 815 { 816 NamedRegionTimer T("legalize_types2", "Type Legalization 2", GroupName, 817 GroupDescription, TimePassesIsEnabled); 818 CurDAG->LegalizeTypes(); 819 } 820 821 DEBUG(dbgs() << "Vector/type-legalized selection DAG: " 822 << printMBBReference(*FuncInfo->MBB) << " '" << BlockName 823 << "'\n"; 824 CurDAG->dump()); 825 826 if (ViewDAGCombineLT && MatchFilterBB) 827 CurDAG->viewGraph("dag-combine-lv input for " + BlockName); 828 829 // Run the DAG combiner in post-type-legalize mode. 830 { 831 NamedRegionTimer T("combine_lv", "DAG Combining after legalize vectors", 832 GroupName, GroupDescription, TimePassesIsEnabled); 833 CurDAG->Combine(AfterLegalizeVectorOps, AA, OptLevel); 834 } 835 836 DEBUG(dbgs() << "Optimized vector-legalized selection DAG: " 837 << printMBBReference(*FuncInfo->MBB) << " '" << BlockName 838 << "'\n"; 839 CurDAG->dump()); 840 841 if (TTI.hasBranchDivergence()) 842 CurDAG->VerifyDAGDiverence(); 843 } 844 845 if (ViewLegalizeDAGs && MatchFilterBB) 846 CurDAG->viewGraph("legalize input for " + BlockName); 847 848 { 849 NamedRegionTimer T("legalize", "DAG Legalization", GroupName, 850 GroupDescription, TimePassesIsEnabled); 851 CurDAG->Legalize(); 852 } 853 854 if (TTI.hasBranchDivergence()) 855 CurDAG->VerifyDAGDiverence(); 856 857 DEBUG(dbgs() << "Legalized selection DAG: " 858 << printMBBReference(*FuncInfo->MBB) << " '" << BlockName 859 << "'\n"; 860 CurDAG->dump()); 861 862 if (ViewDAGCombine2 && MatchFilterBB) 863 CurDAG->viewGraph("dag-combine2 input for " + BlockName); 864 865 // Run the DAG combiner in post-legalize mode. 866 { 867 NamedRegionTimer T("combine2", "DAG Combining 2", GroupName, 868 GroupDescription, TimePassesIsEnabled); 869 CurDAG->Combine(AfterLegalizeDAG, AA, OptLevel); 870 } 871 872 if (TTI.hasBranchDivergence()) 873 CurDAG->VerifyDAGDiverence(); 874 875 DEBUG(dbgs() << "Optimized legalized selection DAG: " 876 << printMBBReference(*FuncInfo->MBB) << " '" << BlockName 877 << "'\n"; 878 CurDAG->dump()); 879 880 if (OptLevel != CodeGenOpt::None) 881 ComputeLiveOutVRegInfo(); 882 883 if (ViewISelDAGs && MatchFilterBB) 884 CurDAG->viewGraph("isel input for " + BlockName); 885 886 // Third, instruction select all of the operations to machine code, adding the 887 // code to the MachineBasicBlock. 888 { 889 NamedRegionTimer T("isel", "Instruction Selection", GroupName, 890 GroupDescription, TimePassesIsEnabled); 891 DoInstructionSelection(); 892 } 893 894 DEBUG(dbgs() << "Selected selection DAG: " 895 << printMBBReference(*FuncInfo->MBB) << " '" << BlockName 896 << "'\n"; 897 CurDAG->dump()); 898 899 if (ViewSchedDAGs && MatchFilterBB) 900 CurDAG->viewGraph("scheduler input for " + BlockName); 901 902 // Schedule machine code. 903 ScheduleDAGSDNodes *Scheduler = CreateScheduler(); 904 { 905 NamedRegionTimer T("sched", "Instruction Scheduling", GroupName, 906 GroupDescription, TimePassesIsEnabled); 907 Scheduler->Run(CurDAG, FuncInfo->MBB); 908 } 909 910 if (ViewSUnitDAGs && MatchFilterBB) 911 Scheduler->viewGraph(); 912 913 // Emit machine code to BB. This can change 'BB' to the last block being 914 // inserted into. 915 MachineBasicBlock *FirstMBB = FuncInfo->MBB, *LastMBB; 916 { 917 NamedRegionTimer T("emit", "Instruction Creation", GroupName, 918 GroupDescription, TimePassesIsEnabled); 919 920 // FuncInfo->InsertPt is passed by reference and set to the end of the 921 // scheduled instructions. 922 LastMBB = FuncInfo->MBB = Scheduler->EmitSchedule(FuncInfo->InsertPt); 923 } 924 925 // If the block was split, make sure we update any references that are used to 926 // update PHI nodes later on. 927 if (FirstMBB != LastMBB) 928 SDB->UpdateSplitBlock(FirstMBB, LastMBB); 929 930 // Free the scheduler state. 931 { 932 NamedRegionTimer T("cleanup", "Instruction Scheduling Cleanup", GroupName, 933 GroupDescription, TimePassesIsEnabled); 934 delete Scheduler; 935 } 936 937 // Free the SelectionDAG state, now that we're finished with it. 938 CurDAG->clear(); 939 } 940 941 namespace { 942 943 /// ISelUpdater - helper class to handle updates of the instruction selection 944 /// graph. 945 class ISelUpdater : public SelectionDAG::DAGUpdateListener { 946 SelectionDAG::allnodes_iterator &ISelPosition; 947 948 public: 949 ISelUpdater(SelectionDAG &DAG, SelectionDAG::allnodes_iterator &isp) 950 : SelectionDAG::DAGUpdateListener(DAG), ISelPosition(isp) {} 951 952 /// NodeDeleted - Handle nodes deleted from the graph. If the node being 953 /// deleted is the current ISelPosition node, update ISelPosition. 954 /// 955 void NodeDeleted(SDNode *N, SDNode *E) override { 956 if (ISelPosition == SelectionDAG::allnodes_iterator(N)) 957 ++ISelPosition; 958 } 959 }; 960 961 } // end anonymous namespace 962 963 // This function is used to enforce the topological node id property 964 // property leveraged during Instruction selection. Before selection all 965 // nodes are given a non-negative id such that all nodes have a larger id than 966 // their operands. As this holds transitively we can prune checks that a node N 967 // is a predecessor of M another by not recursively checking through M's 968 // operands if N's ID is larger than M's ID. This is significantly improves 969 // performance of for various legality checks (e.g. IsLegalToFold / 970 // UpdateChains). 971 972 // However, when we fuse multiple nodes into a single node 973 // during selection we may induce a predecessor relationship between inputs and 974 // outputs of distinct nodes being merged violating the topological property. 975 // Should a fused node have a successor which has yet to be selected, our 976 // legality checks would be incorrect. To avoid this we mark all unselected 977 // sucessor nodes, i.e. id != -1 as invalid for pruning by bit-negating (x => 978 // (-(x+1))) the ids and modify our pruning check to ignore negative Ids of M. 979 // We use bit-negation to more clearly enforce that node id -1 can only be 980 // achieved by selected nodes). As the conversion is reversable the original Id, 981 // topological pruning can still be leveraged when looking for unselected nodes. 982 // This method is call internally in all ISel replacement calls. 983 void SelectionDAGISel::EnforceNodeIdInvariant(SDNode *Node) { 984 SmallVector<SDNode *, 4> Nodes; 985 Nodes.push_back(Node); 986 987 while (!Nodes.empty()) { 988 SDNode *N = Nodes.pop_back_val(); 989 for (auto *U : N->uses()) { 990 auto UId = U->getNodeId(); 991 if (UId > 0) { 992 InvalidateNodeId(U); 993 Nodes.push_back(U); 994 } 995 } 996 } 997 } 998 999 // InvalidateNodeId - As discusses in EnforceNodeIdInvariant, mark a 1000 // NodeId with the equivalent node id which is invalid for topological 1001 // pruning. 1002 void SelectionDAGISel::InvalidateNodeId(SDNode *N) { 1003 int InvalidId = -(N->getNodeId() + 1); 1004 N->setNodeId(InvalidId); 1005 } 1006 1007 // getUninvalidatedNodeId - get original uninvalidated node id. 1008 int SelectionDAGISel::getUninvalidatedNodeId(SDNode *N) { 1009 int Id = N->getNodeId(); 1010 if (Id < -1) 1011 return -(Id + 1); 1012 return Id; 1013 } 1014 1015 void SelectionDAGISel::DoInstructionSelection() { 1016 DEBUG(dbgs() << "===== Instruction selection begins: " 1017 << printMBBReference(*FuncInfo->MBB) << " '" 1018 << FuncInfo->MBB->getName() << "'\n"); 1019 1020 PreprocessISelDAG(); 1021 1022 // Select target instructions for the DAG. 1023 { 1024 // Number all nodes with a topological order and set DAGSize. 1025 DAGSize = CurDAG->AssignTopologicalOrder(); 1026 1027 // Create a dummy node (which is not added to allnodes), that adds 1028 // a reference to the root node, preventing it from being deleted, 1029 // and tracking any changes of the root. 1030 HandleSDNode Dummy(CurDAG->getRoot()); 1031 SelectionDAG::allnodes_iterator ISelPosition (CurDAG->getRoot().getNode()); 1032 ++ISelPosition; 1033 1034 // Make sure that ISelPosition gets properly updated when nodes are deleted 1035 // in calls made from this function. 1036 ISelUpdater ISU(*CurDAG, ISelPosition); 1037 1038 // The AllNodes list is now topological-sorted. Visit the 1039 // nodes by starting at the end of the list (the root of the 1040 // graph) and preceding back toward the beginning (the entry 1041 // node). 1042 while (ISelPosition != CurDAG->allnodes_begin()) { 1043 SDNode *Node = &*--ISelPosition; 1044 // Skip dead nodes. DAGCombiner is expected to eliminate all dead nodes, 1045 // but there are currently some corner cases that it misses. Also, this 1046 // makes it theoretically possible to disable the DAGCombiner. 1047 if (Node->use_empty()) 1048 continue; 1049 1050 #ifndef NDEBUG 1051 SmallVector<SDNode *, 4> Nodes; 1052 Nodes.push_back(Node); 1053 1054 while (!Nodes.empty()) { 1055 auto N = Nodes.pop_back_val(); 1056 if (N->getOpcode() == ISD::TokenFactor || N->getNodeId() < 0) 1057 continue; 1058 for (const SDValue &Op : N->op_values()) { 1059 if (Op->getOpcode() == ISD::TokenFactor) 1060 Nodes.push_back(Op.getNode()); 1061 else { 1062 // We rely on topological ordering of node ids for checking for 1063 // cycles when fusing nodes during selection. All unselected nodes 1064 // successors of an already selected node should have a negative id. 1065 // This assertion will catch such cases. If this assertion triggers 1066 // it is likely you using DAG-level Value/Node replacement functions 1067 // (versus equivalent ISEL replacement) in backend-specific 1068 // selections. See comment in EnforceNodeIdInvariant for more 1069 // details. 1070 assert(Op->getNodeId() != -1 && 1071 "Node has already selected predecessor node"); 1072 } 1073 } 1074 } 1075 #endif 1076 1077 // When we are using non-default rounding modes or FP exception behavior 1078 // FP operations are represented by StrictFP pseudo-operations. They 1079 // need to be simplified here so that the target-specific instruction 1080 // selectors know how to handle them. 1081 // 1082 // If the current node is a strict FP pseudo-op, the isStrictFPOp() 1083 // function will provide the corresponding normal FP opcode to which the 1084 // node should be mutated. 1085 // 1086 // FIXME: The backends need a way to handle FP constraints. 1087 if (Node->isStrictFPOpcode()) 1088 Node = CurDAG->mutateStrictFPToFP(Node); 1089 1090 DEBUG(dbgs() << "\nISEL: Starting selection on root node: "; 1091 Node->dump(CurDAG)); 1092 1093 Select(Node); 1094 } 1095 1096 CurDAG->setRoot(Dummy.getValue()); 1097 } 1098 1099 DEBUG(dbgs() << "\n===== Instruction selection ends:\n"); 1100 1101 PostprocessISelDAG(); 1102 } 1103 1104 static bool hasExceptionPointerOrCodeUser(const CatchPadInst *CPI) { 1105 for (const User *U : CPI->users()) { 1106 if (const IntrinsicInst *EHPtrCall = dyn_cast<IntrinsicInst>(U)) { 1107 Intrinsic::ID IID = EHPtrCall->getIntrinsicID(); 1108 if (IID == Intrinsic::eh_exceptionpointer || 1109 IID == Intrinsic::eh_exceptioncode) 1110 return true; 1111 } 1112 } 1113 return false; 1114 } 1115 1116 /// PrepareEHLandingPad - Emit an EH_LABEL, set up live-in registers, and 1117 /// do other setup for EH landing-pad blocks. 1118 bool SelectionDAGISel::PrepareEHLandingPad() { 1119 MachineBasicBlock *MBB = FuncInfo->MBB; 1120 const Constant *PersonalityFn = FuncInfo->Fn->getPersonalityFn(); 1121 const BasicBlock *LLVMBB = MBB->getBasicBlock(); 1122 const TargetRegisterClass *PtrRC = 1123 TLI->getRegClassFor(TLI->getPointerTy(CurDAG->getDataLayout())); 1124 1125 // Catchpads have one live-in register, which typically holds the exception 1126 // pointer or code. 1127 if (const auto *CPI = dyn_cast<CatchPadInst>(LLVMBB->getFirstNonPHI())) { 1128 if (hasExceptionPointerOrCodeUser(CPI)) { 1129 // Get or create the virtual register to hold the pointer or code. Mark 1130 // the live in physreg and copy into the vreg. 1131 MCPhysReg EHPhysReg = TLI->getExceptionPointerRegister(PersonalityFn); 1132 assert(EHPhysReg && "target lacks exception pointer register"); 1133 MBB->addLiveIn(EHPhysReg); 1134 unsigned VReg = FuncInfo->getCatchPadExceptionPointerVReg(CPI, PtrRC); 1135 BuildMI(*MBB, FuncInfo->InsertPt, SDB->getCurDebugLoc(), 1136 TII->get(TargetOpcode::COPY), VReg) 1137 .addReg(EHPhysReg, RegState::Kill); 1138 } 1139 return true; 1140 } 1141 1142 if (!LLVMBB->isLandingPad()) 1143 return true; 1144 1145 // Add a label to mark the beginning of the landing pad. Deletion of the 1146 // landing pad can thus be detected via the MachineModuleInfo. 1147 MCSymbol *Label = MF->addLandingPad(MBB); 1148 1149 // Assign the call site to the landing pad's begin label. 1150 MF->setCallSiteLandingPad(Label, SDB->LPadToCallSiteMap[MBB]); 1151 1152 const MCInstrDesc &II = TII->get(TargetOpcode::EH_LABEL); 1153 BuildMI(*MBB, FuncInfo->InsertPt, SDB->getCurDebugLoc(), II) 1154 .addSym(Label); 1155 1156 // Mark exception register as live in. 1157 if (unsigned Reg = TLI->getExceptionPointerRegister(PersonalityFn)) 1158 FuncInfo->ExceptionPointerVirtReg = MBB->addLiveIn(Reg, PtrRC); 1159 1160 // Mark exception selector register as live in. 1161 if (unsigned Reg = TLI->getExceptionSelectorRegister(PersonalityFn)) 1162 FuncInfo->ExceptionSelectorVirtReg = MBB->addLiveIn(Reg, PtrRC); 1163 1164 return true; 1165 } 1166 1167 /// isFoldedOrDeadInstruction - Return true if the specified instruction is 1168 /// side-effect free and is either dead or folded into a generated instruction. 1169 /// Return false if it needs to be emitted. 1170 static bool isFoldedOrDeadInstruction(const Instruction *I, 1171 FunctionLoweringInfo *FuncInfo) { 1172 return !I->mayWriteToMemory() && // Side-effecting instructions aren't folded. 1173 !isa<TerminatorInst>(I) && // Terminators aren't folded. 1174 !isa<DbgInfoIntrinsic>(I) && // Debug instructions aren't folded. 1175 !I->isEHPad() && // EH pad instructions aren't folded. 1176 !FuncInfo->isExportedInst(I); // Exported instrs must be computed. 1177 } 1178 1179 /// Set up SwiftErrorVals by going through the function. If the function has 1180 /// swifterror argument, it will be the first entry. 1181 static void setupSwiftErrorVals(const Function &Fn, const TargetLowering *TLI, 1182 FunctionLoweringInfo *FuncInfo) { 1183 if (!TLI->supportSwiftError()) 1184 return; 1185 1186 FuncInfo->SwiftErrorVals.clear(); 1187 FuncInfo->SwiftErrorVRegDefMap.clear(); 1188 FuncInfo->SwiftErrorVRegUpwardsUse.clear(); 1189 FuncInfo->SwiftErrorVRegDefUses.clear(); 1190 FuncInfo->SwiftErrorArg = nullptr; 1191 1192 // Check if function has a swifterror argument. 1193 bool HaveSeenSwiftErrorArg = false; 1194 for (Function::const_arg_iterator AI = Fn.arg_begin(), AE = Fn.arg_end(); 1195 AI != AE; ++AI) 1196 if (AI->hasSwiftErrorAttr()) { 1197 assert(!HaveSeenSwiftErrorArg && 1198 "Must have only one swifterror parameter"); 1199 (void)HaveSeenSwiftErrorArg; // silence warning. 1200 HaveSeenSwiftErrorArg = true; 1201 FuncInfo->SwiftErrorArg = &*AI; 1202 FuncInfo->SwiftErrorVals.push_back(&*AI); 1203 } 1204 1205 for (const auto &LLVMBB : Fn) 1206 for (const auto &Inst : LLVMBB) { 1207 if (const AllocaInst *Alloca = dyn_cast<AllocaInst>(&Inst)) 1208 if (Alloca->isSwiftError()) 1209 FuncInfo->SwiftErrorVals.push_back(Alloca); 1210 } 1211 } 1212 1213 static void createSwiftErrorEntriesInEntryBlock(FunctionLoweringInfo *FuncInfo, 1214 FastISel *FastIS, 1215 const TargetLowering *TLI, 1216 const TargetInstrInfo *TII, 1217 SelectionDAGBuilder *SDB) { 1218 if (!TLI->supportSwiftError()) 1219 return; 1220 1221 // We only need to do this when we have swifterror parameter or swifterror 1222 // alloc. 1223 if (FuncInfo->SwiftErrorVals.empty()) 1224 return; 1225 1226 assert(FuncInfo->MBB == &*FuncInfo->MF->begin() && 1227 "expected to insert into entry block"); 1228 auto &DL = FuncInfo->MF->getDataLayout(); 1229 auto const *RC = TLI->getRegClassFor(TLI->getPointerTy(DL)); 1230 for (const auto *SwiftErrorVal : FuncInfo->SwiftErrorVals) { 1231 // We will always generate a copy from the argument. It is always used at 1232 // least by the 'return' of the swifterror. 1233 if (FuncInfo->SwiftErrorArg && FuncInfo->SwiftErrorArg == SwiftErrorVal) 1234 continue; 1235 unsigned VReg = FuncInfo->MF->getRegInfo().createVirtualRegister(RC); 1236 // Assign Undef to Vreg. We construct MI directly to make sure it works 1237 // with FastISel. 1238 BuildMI(*FuncInfo->MBB, FuncInfo->MBB->getFirstNonPHI(), 1239 SDB->getCurDebugLoc(), TII->get(TargetOpcode::IMPLICIT_DEF), 1240 VReg); 1241 1242 // Keep FastIS informed about the value we just inserted. 1243 if (FastIS) 1244 FastIS->setLastLocalValue(&*std::prev(FuncInfo->InsertPt)); 1245 1246 FuncInfo->setCurrentSwiftErrorVReg(FuncInfo->MBB, SwiftErrorVal, VReg); 1247 } 1248 } 1249 1250 /// Collect llvm.dbg.declare information. This is done after argument lowering 1251 /// in case the declarations refer to arguments. 1252 static void processDbgDeclares(FunctionLoweringInfo *FuncInfo) { 1253 MachineFunction *MF = FuncInfo->MF; 1254 const DataLayout &DL = MF->getDataLayout(); 1255 for (const BasicBlock &BB : *FuncInfo->Fn) { 1256 for (const Instruction &I : BB) { 1257 const DbgDeclareInst *DI = dyn_cast<DbgDeclareInst>(&I); 1258 if (!DI) 1259 continue; 1260 1261 assert(DI->getVariable() && "Missing variable"); 1262 assert(DI->getDebugLoc() && "Missing location"); 1263 const Value *Address = DI->getAddress(); 1264 if (!Address) 1265 continue; 1266 1267 // Look through casts and constant offset GEPs. These mostly come from 1268 // inalloca. 1269 APInt Offset(DL.getTypeSizeInBits(Address->getType()), 0); 1270 Address = Address->stripAndAccumulateInBoundsConstantOffsets(DL, Offset); 1271 1272 // Check if the variable is a static alloca or a byval or inalloca 1273 // argument passed in memory. If it is not, then we will ignore this 1274 // intrinsic and handle this during isel like dbg.value. 1275 int FI = std::numeric_limits<int>::max(); 1276 if (const auto *AI = dyn_cast<AllocaInst>(Address)) { 1277 auto SI = FuncInfo->StaticAllocaMap.find(AI); 1278 if (SI != FuncInfo->StaticAllocaMap.end()) 1279 FI = SI->second; 1280 } else if (const auto *Arg = dyn_cast<Argument>(Address)) 1281 FI = FuncInfo->getArgumentFrameIndex(Arg); 1282 1283 if (FI == std::numeric_limits<int>::max()) 1284 continue; 1285 1286 DIExpression *Expr = DI->getExpression(); 1287 if (Offset.getBoolValue()) 1288 Expr = DIExpression::prepend(Expr, DIExpression::NoDeref, 1289 Offset.getZExtValue()); 1290 MF->setVariableDbgInfo(DI->getVariable(), Expr, FI, DI->getDebugLoc()); 1291 } 1292 } 1293 } 1294 1295 /// Propagate swifterror values through the machine function CFG. 1296 static void propagateSwiftErrorVRegs(FunctionLoweringInfo *FuncInfo) { 1297 auto *TLI = FuncInfo->TLI; 1298 if (!TLI->supportSwiftError()) 1299 return; 1300 1301 // We only need to do this when we have swifterror parameter or swifterror 1302 // alloc. 1303 if (FuncInfo->SwiftErrorVals.empty()) 1304 return; 1305 1306 // For each machine basic block in reverse post order. 1307 ReversePostOrderTraversal<MachineFunction *> RPOT(FuncInfo->MF); 1308 for (MachineBasicBlock *MBB : RPOT) { 1309 // For each swifterror value in the function. 1310 for(const auto *SwiftErrorVal : FuncInfo->SwiftErrorVals) { 1311 auto Key = std::make_pair(MBB, SwiftErrorVal); 1312 auto UUseIt = FuncInfo->SwiftErrorVRegUpwardsUse.find(Key); 1313 auto VRegDefIt = FuncInfo->SwiftErrorVRegDefMap.find(Key); 1314 bool UpwardsUse = UUseIt != FuncInfo->SwiftErrorVRegUpwardsUse.end(); 1315 unsigned UUseVReg = UpwardsUse ? UUseIt->second : 0; 1316 bool DownwardDef = VRegDefIt != FuncInfo->SwiftErrorVRegDefMap.end(); 1317 assert(!(UpwardsUse && !DownwardDef) && 1318 "We can't have an upwards use but no downwards def"); 1319 1320 // If there is no upwards exposed use and an entry for the swifterror in 1321 // the def map for this value we don't need to do anything: We already 1322 // have a downward def for this basic block. 1323 if (!UpwardsUse && DownwardDef) 1324 continue; 1325 1326 // Otherwise we either have an upwards exposed use vreg that we need to 1327 // materialize or need to forward the downward def from predecessors. 1328 1329 // Check whether we have a single vreg def from all predecessors. 1330 // Otherwise we need a phi. 1331 SmallVector<std::pair<MachineBasicBlock *, unsigned>, 4> VRegs; 1332 SmallSet<const MachineBasicBlock*, 8> Visited; 1333 for (auto *Pred : MBB->predecessors()) { 1334 if (!Visited.insert(Pred).second) 1335 continue; 1336 VRegs.push_back(std::make_pair( 1337 Pred, FuncInfo->getOrCreateSwiftErrorVReg(Pred, SwiftErrorVal))); 1338 if (Pred != MBB) 1339 continue; 1340 // We have a self-edge. 1341 // If there was no upwards use in this basic block there is now one: the 1342 // phi needs to use it self. 1343 if (!UpwardsUse) { 1344 UpwardsUse = true; 1345 UUseIt = FuncInfo->SwiftErrorVRegUpwardsUse.find(Key); 1346 assert(UUseIt != FuncInfo->SwiftErrorVRegUpwardsUse.end()); 1347 UUseVReg = UUseIt->second; 1348 } 1349 } 1350 1351 // We need a phi node if we have more than one predecessor with different 1352 // downward defs. 1353 bool needPHI = 1354 VRegs.size() >= 1 && 1355 std::find_if( 1356 VRegs.begin(), VRegs.end(), 1357 [&](const std::pair<const MachineBasicBlock *, unsigned> &V) 1358 -> bool { return V.second != VRegs[0].second; }) != 1359 VRegs.end(); 1360 1361 // If there is no upwards exposed used and we don't need a phi just 1362 // forward the swifterror vreg from the predecessor(s). 1363 if (!UpwardsUse && !needPHI) { 1364 assert(!VRegs.empty() && 1365 "No predecessors? The entry block should bail out earlier"); 1366 // Just forward the swifterror vreg from the predecessor(s). 1367 FuncInfo->setCurrentSwiftErrorVReg(MBB, SwiftErrorVal, VRegs[0].second); 1368 continue; 1369 } 1370 1371 auto DLoc = isa<Instruction>(SwiftErrorVal) 1372 ? cast<Instruction>(SwiftErrorVal)->getDebugLoc() 1373 : DebugLoc(); 1374 const auto *TII = FuncInfo->MF->getSubtarget().getInstrInfo(); 1375 1376 // If we don't need a phi create a copy to the upward exposed vreg. 1377 if (!needPHI) { 1378 assert(UpwardsUse); 1379 assert(!VRegs.empty() && 1380 "No predecessors? Is the Calling Convention correct?"); 1381 unsigned DestReg = UUseVReg; 1382 BuildMI(*MBB, MBB->getFirstNonPHI(), DLoc, TII->get(TargetOpcode::COPY), 1383 DestReg) 1384 .addReg(VRegs[0].second); 1385 continue; 1386 } 1387 1388 // We need a phi: if there is an upwards exposed use we already have a 1389 // destination virtual register number otherwise we generate a new one. 1390 auto &DL = FuncInfo->MF->getDataLayout(); 1391 auto const *RC = TLI->getRegClassFor(TLI->getPointerTy(DL)); 1392 unsigned PHIVReg = 1393 UpwardsUse ? UUseVReg 1394 : FuncInfo->MF->getRegInfo().createVirtualRegister(RC); 1395 MachineInstrBuilder SwiftErrorPHI = 1396 BuildMI(*MBB, MBB->getFirstNonPHI(), DLoc, 1397 TII->get(TargetOpcode::PHI), PHIVReg); 1398 for (auto BBRegPair : VRegs) { 1399 SwiftErrorPHI.addReg(BBRegPair.second).addMBB(BBRegPair.first); 1400 } 1401 1402 // We did not have a definition in this block before: store the phi's vreg 1403 // as this block downward exposed def. 1404 if (!UpwardsUse) 1405 FuncInfo->setCurrentSwiftErrorVReg(MBB, SwiftErrorVal, PHIVReg); 1406 } 1407 } 1408 } 1409 1410 static void preassignSwiftErrorRegs(const TargetLowering *TLI, 1411 FunctionLoweringInfo *FuncInfo, 1412 BasicBlock::const_iterator Begin, 1413 BasicBlock::const_iterator End) { 1414 if (!TLI->supportSwiftError() || FuncInfo->SwiftErrorVals.empty()) 1415 return; 1416 1417 // Iterator over instructions and assign vregs to swifterror defs and uses. 1418 for (auto It = Begin; It != End; ++It) { 1419 ImmutableCallSite CS(&*It); 1420 if (CS) { 1421 // A call-site with a swifterror argument is both use and def. 1422 const Value *SwiftErrorAddr = nullptr; 1423 for (auto &Arg : CS.args()) { 1424 if (!Arg->isSwiftError()) 1425 continue; 1426 // Use of swifterror. 1427 assert(!SwiftErrorAddr && "Cannot have multiple swifterror arguments"); 1428 SwiftErrorAddr = &*Arg; 1429 assert(SwiftErrorAddr->isSwiftError() && 1430 "Must have a swifterror value argument"); 1431 unsigned VReg; bool CreatedReg; 1432 std::tie(VReg, CreatedReg) = FuncInfo->getOrCreateSwiftErrorVRegUseAt( 1433 &*It, FuncInfo->MBB, SwiftErrorAddr); 1434 assert(CreatedReg); 1435 } 1436 if (!SwiftErrorAddr) 1437 continue; 1438 1439 // Def of swifterror. 1440 unsigned VReg; bool CreatedReg; 1441 std::tie(VReg, CreatedReg) = 1442 FuncInfo->getOrCreateSwiftErrorVRegDefAt(&*It); 1443 assert(CreatedReg); 1444 FuncInfo->setCurrentSwiftErrorVReg(FuncInfo->MBB, SwiftErrorAddr, VReg); 1445 1446 // A load is a use. 1447 } else if (const LoadInst *LI = dyn_cast<const LoadInst>(&*It)) { 1448 const Value *V = LI->getOperand(0); 1449 if (!V->isSwiftError()) 1450 continue; 1451 1452 unsigned VReg; bool CreatedReg; 1453 std::tie(VReg, CreatedReg) = 1454 FuncInfo->getOrCreateSwiftErrorVRegUseAt(LI, FuncInfo->MBB, V); 1455 assert(CreatedReg); 1456 1457 // A store is a def. 1458 } else if (const StoreInst *SI = dyn_cast<const StoreInst>(&*It)) { 1459 const Value *SwiftErrorAddr = SI->getOperand(1); 1460 if (!SwiftErrorAddr->isSwiftError()) 1461 continue; 1462 1463 // Def of swifterror. 1464 unsigned VReg; bool CreatedReg; 1465 std::tie(VReg, CreatedReg) = 1466 FuncInfo->getOrCreateSwiftErrorVRegDefAt(&*It); 1467 assert(CreatedReg); 1468 FuncInfo->setCurrentSwiftErrorVReg(FuncInfo->MBB, SwiftErrorAddr, VReg); 1469 1470 // A return in a swiferror returning function is a use. 1471 } else if (const ReturnInst *R = dyn_cast<const ReturnInst>(&*It)) { 1472 const Function *F = R->getParent()->getParent(); 1473 if(!F->getAttributes().hasAttrSomewhere(Attribute::SwiftError)) 1474 continue; 1475 1476 unsigned VReg; bool CreatedReg; 1477 std::tie(VReg, CreatedReg) = FuncInfo->getOrCreateSwiftErrorVRegUseAt( 1478 R, FuncInfo->MBB, FuncInfo->SwiftErrorArg); 1479 assert(CreatedReg); 1480 } 1481 } 1482 } 1483 1484 void SelectionDAGISel::SelectAllBasicBlocks(const Function &Fn) { 1485 FastISelFailed = false; 1486 // Initialize the Fast-ISel state, if needed. 1487 FastISel *FastIS = nullptr; 1488 if (TM.Options.EnableFastISel) { 1489 DEBUG(dbgs() << "Enabling fast-isel\n"); 1490 FastIS = TLI->createFastISel(*FuncInfo, LibInfo); 1491 } 1492 1493 setupSwiftErrorVals(Fn, TLI, FuncInfo); 1494 1495 ReversePostOrderTraversal<const Function*> RPOT(&Fn); 1496 1497 // Lower arguments up front. An RPO iteration always visits the entry block 1498 // first. 1499 assert(*RPOT.begin() == &Fn.getEntryBlock()); 1500 ++NumEntryBlocks; 1501 1502 // Set up FuncInfo for ISel. Entry blocks never have PHIs. 1503 FuncInfo->MBB = FuncInfo->MBBMap[&Fn.getEntryBlock()]; 1504 FuncInfo->InsertPt = FuncInfo->MBB->begin(); 1505 1506 CurDAG->setFunctionLoweringInfo(FuncInfo); 1507 1508 if (!FastIS) { 1509 LowerArguments(Fn); 1510 } else { 1511 // See if fast isel can lower the arguments. 1512 FastIS->startNewBlock(); 1513 if (!FastIS->lowerArguments()) { 1514 FastISelFailed = true; 1515 // Fast isel failed to lower these arguments 1516 ++NumFastIselFailLowerArguments; 1517 1518 OptimizationRemarkMissed R("sdagisel", "FastISelFailure", 1519 Fn.getSubprogram(), 1520 &Fn.getEntryBlock()); 1521 R << "FastISel didn't lower all arguments: " 1522 << ore::NV("Prototype", Fn.getType()); 1523 reportFastISelFailure(*MF, *ORE, R, EnableFastISelAbort > 1); 1524 1525 // Use SelectionDAG argument lowering 1526 LowerArguments(Fn); 1527 CurDAG->setRoot(SDB->getControlRoot()); 1528 SDB->clear(); 1529 CodeGenAndEmitDAG(); 1530 } 1531 1532 // If we inserted any instructions at the beginning, make a note of 1533 // where they are, so we can be sure to emit subsequent instructions 1534 // after them. 1535 if (FuncInfo->InsertPt != FuncInfo->MBB->begin()) 1536 FastIS->setLastLocalValue(&*std::prev(FuncInfo->InsertPt)); 1537 else 1538 FastIS->setLastLocalValue(nullptr); 1539 } 1540 createSwiftErrorEntriesInEntryBlock(FuncInfo, FastIS, TLI, TII, SDB); 1541 1542 processDbgDeclares(FuncInfo); 1543 1544 // Iterate over all basic blocks in the function. 1545 for (const BasicBlock *LLVMBB : RPOT) { 1546 if (OptLevel != CodeGenOpt::None) { 1547 bool AllPredsVisited = true; 1548 for (const_pred_iterator PI = pred_begin(LLVMBB), PE = pred_end(LLVMBB); 1549 PI != PE; ++PI) { 1550 if (!FuncInfo->VisitedBBs.count(*PI)) { 1551 AllPredsVisited = false; 1552 break; 1553 } 1554 } 1555 1556 if (AllPredsVisited) { 1557 for (const PHINode &PN : LLVMBB->phis()) 1558 FuncInfo->ComputePHILiveOutRegInfo(&PN); 1559 } else { 1560 for (const PHINode &PN : LLVMBB->phis()) 1561 FuncInfo->InvalidatePHILiveOutRegInfo(&PN); 1562 } 1563 1564 FuncInfo->VisitedBBs.insert(LLVMBB); 1565 } 1566 1567 BasicBlock::const_iterator const Begin = 1568 LLVMBB->getFirstNonPHI()->getIterator(); 1569 BasicBlock::const_iterator const End = LLVMBB->end(); 1570 BasicBlock::const_iterator BI = End; 1571 1572 FuncInfo->MBB = FuncInfo->MBBMap[LLVMBB]; 1573 if (!FuncInfo->MBB) 1574 continue; // Some blocks like catchpads have no code or MBB. 1575 1576 // Insert new instructions after any phi or argument setup code. 1577 FuncInfo->InsertPt = FuncInfo->MBB->end(); 1578 1579 // Setup an EH landing-pad block. 1580 FuncInfo->ExceptionPointerVirtReg = 0; 1581 FuncInfo->ExceptionSelectorVirtReg = 0; 1582 if (LLVMBB->isEHPad()) 1583 if (!PrepareEHLandingPad()) 1584 continue; 1585 1586 // Before doing SelectionDAG ISel, see if FastISel has been requested. 1587 if (FastIS) { 1588 if (LLVMBB != &Fn.getEntryBlock()) 1589 FastIS->startNewBlock(); 1590 1591 unsigned NumFastIselRemaining = std::distance(Begin, End); 1592 1593 // Pre-assign swifterror vregs. 1594 preassignSwiftErrorRegs(TLI, FuncInfo, Begin, End); 1595 1596 // Do FastISel on as many instructions as possible. 1597 for (; BI != Begin; --BI) { 1598 const Instruction *Inst = &*std::prev(BI); 1599 1600 // If we no longer require this instruction, skip it. 1601 if (isFoldedOrDeadInstruction(Inst, FuncInfo) || 1602 ElidedArgCopyInstrs.count(Inst)) { 1603 --NumFastIselRemaining; 1604 continue; 1605 } 1606 1607 // Bottom-up: reset the insert pos at the top, after any local-value 1608 // instructions. 1609 FastIS->recomputeInsertPt(); 1610 1611 // Try to select the instruction with FastISel. 1612 if (FastIS->selectInstruction(Inst)) { 1613 --NumFastIselRemaining; 1614 ++NumFastIselSuccess; 1615 // If fast isel succeeded, skip over all the folded instructions, and 1616 // then see if there is a load right before the selected instructions. 1617 // Try to fold the load if so. 1618 const Instruction *BeforeInst = Inst; 1619 while (BeforeInst != &*Begin) { 1620 BeforeInst = &*std::prev(BasicBlock::const_iterator(BeforeInst)); 1621 if (!isFoldedOrDeadInstruction(BeforeInst, FuncInfo)) 1622 break; 1623 } 1624 if (BeforeInst != Inst && isa<LoadInst>(BeforeInst) && 1625 BeforeInst->hasOneUse() && 1626 FastIS->tryToFoldLoad(cast<LoadInst>(BeforeInst), Inst)) { 1627 // If we succeeded, don't re-select the load. 1628 BI = std::next(BasicBlock::const_iterator(BeforeInst)); 1629 --NumFastIselRemaining; 1630 ++NumFastIselSuccess; 1631 } 1632 continue; 1633 } 1634 1635 FastISelFailed = true; 1636 1637 // Then handle certain instructions as single-LLVM-Instruction blocks. 1638 // We cannot separate out GCrelocates to their own blocks since we need 1639 // to keep track of gc-relocates for a particular gc-statepoint. This is 1640 // done by SelectionDAGBuilder::LowerAsSTATEPOINT, called before 1641 // visitGCRelocate. 1642 if (isa<CallInst>(Inst) && !isStatepoint(Inst) && !isGCRelocate(Inst)) { 1643 OptimizationRemarkMissed R("sdagisel", "FastISelFailure", 1644 Inst->getDebugLoc(), LLVMBB); 1645 1646 R << "FastISel missed call"; 1647 1648 if (R.isEnabled() || EnableFastISelAbort) { 1649 std::string InstStrStorage; 1650 raw_string_ostream InstStr(InstStrStorage); 1651 InstStr << *Inst; 1652 1653 R << ": " << InstStr.str(); 1654 } 1655 1656 reportFastISelFailure(*MF, *ORE, R, EnableFastISelAbort > 2); 1657 1658 if (!Inst->getType()->isVoidTy() && !Inst->getType()->isTokenTy() && 1659 !Inst->use_empty()) { 1660 unsigned &R = FuncInfo->ValueMap[Inst]; 1661 if (!R) 1662 R = FuncInfo->CreateRegs(Inst->getType()); 1663 } 1664 1665 bool HadTailCall = false; 1666 MachineBasicBlock::iterator SavedInsertPt = FuncInfo->InsertPt; 1667 SelectBasicBlock(Inst->getIterator(), BI, HadTailCall); 1668 1669 // If the call was emitted as a tail call, we're done with the block. 1670 // We also need to delete any previously emitted instructions. 1671 if (HadTailCall) { 1672 FastIS->removeDeadCode(SavedInsertPt, FuncInfo->MBB->end()); 1673 --BI; 1674 break; 1675 } 1676 1677 // Recompute NumFastIselRemaining as Selection DAG instruction 1678 // selection may have handled the call, input args, etc. 1679 unsigned RemainingNow = std::distance(Begin, BI); 1680 NumFastIselFailures += NumFastIselRemaining - RemainingNow; 1681 NumFastIselRemaining = RemainingNow; 1682 continue; 1683 } 1684 1685 OptimizationRemarkMissed R("sdagisel", "FastISelFailure", 1686 Inst->getDebugLoc(), LLVMBB); 1687 1688 bool ShouldAbort = EnableFastISelAbort; 1689 if (isa<TerminatorInst>(Inst)) { 1690 // Use a different message for terminator misses. 1691 R << "FastISel missed terminator"; 1692 // Don't abort for terminator unless the level is really high 1693 ShouldAbort = (EnableFastISelAbort > 2); 1694 } else { 1695 R << "FastISel missed"; 1696 } 1697 1698 if (R.isEnabled() || EnableFastISelAbort) { 1699 std::string InstStrStorage; 1700 raw_string_ostream InstStr(InstStrStorage); 1701 InstStr << *Inst; 1702 R << ": " << InstStr.str(); 1703 } 1704 1705 reportFastISelFailure(*MF, *ORE, R, ShouldAbort); 1706 1707 NumFastIselFailures += NumFastIselRemaining; 1708 break; 1709 } 1710 1711 FastIS->recomputeInsertPt(); 1712 } 1713 1714 if (getAnalysis<StackProtector>().shouldEmitSDCheck(*LLVMBB)) { 1715 bool FunctionBasedInstrumentation = 1716 TLI->getSSPStackGuardCheck(*Fn.getParent()); 1717 SDB->SPDescriptor.initialize(LLVMBB, FuncInfo->MBBMap[LLVMBB], 1718 FunctionBasedInstrumentation); 1719 } 1720 1721 if (Begin != BI) 1722 ++NumDAGBlocks; 1723 else 1724 ++NumFastIselBlocks; 1725 1726 if (Begin != BI) { 1727 // Run SelectionDAG instruction selection on the remainder of the block 1728 // not handled by FastISel. If FastISel is not run, this is the entire 1729 // block. 1730 bool HadTailCall; 1731 SelectBasicBlock(Begin, BI, HadTailCall); 1732 1733 // But if FastISel was run, we already selected some of the block. 1734 // If we emitted a tail-call, we need to delete any previously emitted 1735 // instruction that follows it. 1736 if (HadTailCall && FuncInfo->InsertPt != FuncInfo->MBB->end()) 1737 FastIS->removeDeadCode(FuncInfo->InsertPt, FuncInfo->MBB->end()); 1738 } 1739 1740 if (FastIS) 1741 FastIS->finishBasicBlock(); 1742 FinishBasicBlock(); 1743 FuncInfo->PHINodesToUpdate.clear(); 1744 ElidedArgCopyInstrs.clear(); 1745 } 1746 1747 propagateSwiftErrorVRegs(FuncInfo); 1748 1749 delete FastIS; 1750 SDB->clearDanglingDebugInfo(); 1751 SDB->SPDescriptor.resetPerFunctionState(); 1752 } 1753 1754 /// Given that the input MI is before a partial terminator sequence TSeq, return 1755 /// true if M + TSeq also a partial terminator sequence. 1756 /// 1757 /// A Terminator sequence is a sequence of MachineInstrs which at this point in 1758 /// lowering copy vregs into physical registers, which are then passed into 1759 /// terminator instructors so we can satisfy ABI constraints. A partial 1760 /// terminator sequence is an improper subset of a terminator sequence (i.e. it 1761 /// may be the whole terminator sequence). 1762 static bool MIIsInTerminatorSequence(const MachineInstr &MI) { 1763 // If we do not have a copy or an implicit def, we return true if and only if 1764 // MI is a debug value. 1765 if (!MI.isCopy() && !MI.isImplicitDef()) 1766 // Sometimes DBG_VALUE MI sneak in between the copies from the vregs to the 1767 // physical registers if there is debug info associated with the terminator 1768 // of our mbb. We want to include said debug info in our terminator 1769 // sequence, so we return true in that case. 1770 return MI.isDebugValue(); 1771 1772 // We have left the terminator sequence if we are not doing one of the 1773 // following: 1774 // 1775 // 1. Copying a vreg into a physical register. 1776 // 2. Copying a vreg into a vreg. 1777 // 3. Defining a register via an implicit def. 1778 1779 // OPI should always be a register definition... 1780 MachineInstr::const_mop_iterator OPI = MI.operands_begin(); 1781 if (!OPI->isReg() || !OPI->isDef()) 1782 return false; 1783 1784 // Defining any register via an implicit def is always ok. 1785 if (MI.isImplicitDef()) 1786 return true; 1787 1788 // Grab the copy source... 1789 MachineInstr::const_mop_iterator OPI2 = OPI; 1790 ++OPI2; 1791 assert(OPI2 != MI.operands_end() 1792 && "Should have a copy implying we should have 2 arguments."); 1793 1794 // Make sure that the copy dest is not a vreg when the copy source is a 1795 // physical register. 1796 if (!OPI2->isReg() || 1797 (!TargetRegisterInfo::isPhysicalRegister(OPI->getReg()) && 1798 TargetRegisterInfo::isPhysicalRegister(OPI2->getReg()))) 1799 return false; 1800 1801 return true; 1802 } 1803 1804 /// Find the split point at which to splice the end of BB into its success stack 1805 /// protector check machine basic block. 1806 /// 1807 /// On many platforms, due to ABI constraints, terminators, even before register 1808 /// allocation, use physical registers. This creates an issue for us since 1809 /// physical registers at this point can not travel across basic 1810 /// blocks. Luckily, selectiondag always moves physical registers into vregs 1811 /// when they enter functions and moves them through a sequence of copies back 1812 /// into the physical registers right before the terminator creating a 1813 /// ``Terminator Sequence''. This function is searching for the beginning of the 1814 /// terminator sequence so that we can ensure that we splice off not just the 1815 /// terminator, but additionally the copies that move the vregs into the 1816 /// physical registers. 1817 static MachineBasicBlock::iterator 1818 FindSplitPointForStackProtector(MachineBasicBlock *BB) { 1819 MachineBasicBlock::iterator SplitPoint = BB->getFirstTerminator(); 1820 // 1821 if (SplitPoint == BB->begin()) 1822 return SplitPoint; 1823 1824 MachineBasicBlock::iterator Start = BB->begin(); 1825 MachineBasicBlock::iterator Previous = SplitPoint; 1826 --Previous; 1827 1828 while (MIIsInTerminatorSequence(*Previous)) { 1829 SplitPoint = Previous; 1830 if (Previous == Start) 1831 break; 1832 --Previous; 1833 } 1834 1835 return SplitPoint; 1836 } 1837 1838 void 1839 SelectionDAGISel::FinishBasicBlock() { 1840 DEBUG(dbgs() << "Total amount of phi nodes to update: " 1841 << FuncInfo->PHINodesToUpdate.size() << "\n"; 1842 for (unsigned i = 0, e = FuncInfo->PHINodesToUpdate.size(); i != e; ++i) 1843 dbgs() << "Node " << i << " : (" 1844 << FuncInfo->PHINodesToUpdate[i].first 1845 << ", " << FuncInfo->PHINodesToUpdate[i].second << ")\n"); 1846 1847 // Next, now that we know what the last MBB the LLVM BB expanded is, update 1848 // PHI nodes in successors. 1849 for (unsigned i = 0, e = FuncInfo->PHINodesToUpdate.size(); i != e; ++i) { 1850 MachineInstrBuilder PHI(*MF, FuncInfo->PHINodesToUpdate[i].first); 1851 assert(PHI->isPHI() && 1852 "This is not a machine PHI node that we are updating!"); 1853 if (!FuncInfo->MBB->isSuccessor(PHI->getParent())) 1854 continue; 1855 PHI.addReg(FuncInfo->PHINodesToUpdate[i].second).addMBB(FuncInfo->MBB); 1856 } 1857 1858 // Handle stack protector. 1859 if (SDB->SPDescriptor.shouldEmitFunctionBasedCheckStackProtector()) { 1860 // The target provides a guard check function. There is no need to 1861 // generate error handling code or to split current basic block. 1862 MachineBasicBlock *ParentMBB = SDB->SPDescriptor.getParentMBB(); 1863 1864 // Add load and check to the basicblock. 1865 FuncInfo->MBB = ParentMBB; 1866 FuncInfo->InsertPt = 1867 FindSplitPointForStackProtector(ParentMBB); 1868 SDB->visitSPDescriptorParent(SDB->SPDescriptor, ParentMBB); 1869 CurDAG->setRoot(SDB->getRoot()); 1870 SDB->clear(); 1871 CodeGenAndEmitDAG(); 1872 1873 // Clear the Per-BB State. 1874 SDB->SPDescriptor.resetPerBBState(); 1875 } else if (SDB->SPDescriptor.shouldEmitStackProtector()) { 1876 MachineBasicBlock *ParentMBB = SDB->SPDescriptor.getParentMBB(); 1877 MachineBasicBlock *SuccessMBB = SDB->SPDescriptor.getSuccessMBB(); 1878 1879 // Find the split point to split the parent mbb. At the same time copy all 1880 // physical registers used in the tail of parent mbb into virtual registers 1881 // before the split point and back into physical registers after the split 1882 // point. This prevents us needing to deal with Live-ins and many other 1883 // register allocation issues caused by us splitting the parent mbb. The 1884 // register allocator will clean up said virtual copies later on. 1885 MachineBasicBlock::iterator SplitPoint = 1886 FindSplitPointForStackProtector(ParentMBB); 1887 1888 // Splice the terminator of ParentMBB into SuccessMBB. 1889 SuccessMBB->splice(SuccessMBB->end(), ParentMBB, 1890 SplitPoint, 1891 ParentMBB->end()); 1892 1893 // Add compare/jump on neq/jump to the parent BB. 1894 FuncInfo->MBB = ParentMBB; 1895 FuncInfo->InsertPt = ParentMBB->end(); 1896 SDB->visitSPDescriptorParent(SDB->SPDescriptor, ParentMBB); 1897 CurDAG->setRoot(SDB->getRoot()); 1898 SDB->clear(); 1899 CodeGenAndEmitDAG(); 1900 1901 // CodeGen Failure MBB if we have not codegened it yet. 1902 MachineBasicBlock *FailureMBB = SDB->SPDescriptor.getFailureMBB(); 1903 if (FailureMBB->empty()) { 1904 FuncInfo->MBB = FailureMBB; 1905 FuncInfo->InsertPt = FailureMBB->end(); 1906 SDB->visitSPDescriptorFailure(SDB->SPDescriptor); 1907 CurDAG->setRoot(SDB->getRoot()); 1908 SDB->clear(); 1909 CodeGenAndEmitDAG(); 1910 } 1911 1912 // Clear the Per-BB State. 1913 SDB->SPDescriptor.resetPerBBState(); 1914 } 1915 1916 // Lower each BitTestBlock. 1917 for (auto &BTB : SDB->BitTestCases) { 1918 // Lower header first, if it wasn't already lowered 1919 if (!BTB.Emitted) { 1920 // Set the current basic block to the mbb we wish to insert the code into 1921 FuncInfo->MBB = BTB.Parent; 1922 FuncInfo->InsertPt = FuncInfo->MBB->end(); 1923 // Emit the code 1924 SDB->visitBitTestHeader(BTB, FuncInfo->MBB); 1925 CurDAG->setRoot(SDB->getRoot()); 1926 SDB->clear(); 1927 CodeGenAndEmitDAG(); 1928 } 1929 1930 BranchProbability UnhandledProb = BTB.Prob; 1931 for (unsigned j = 0, ej = BTB.Cases.size(); j != ej; ++j) { 1932 UnhandledProb -= BTB.Cases[j].ExtraProb; 1933 // Set the current basic block to the mbb we wish to insert the code into 1934 FuncInfo->MBB = BTB.Cases[j].ThisBB; 1935 FuncInfo->InsertPt = FuncInfo->MBB->end(); 1936 // Emit the code 1937 1938 // If all cases cover a contiguous range, it is not necessary to jump to 1939 // the default block after the last bit test fails. This is because the 1940 // range check during bit test header creation has guaranteed that every 1941 // case here doesn't go outside the range. In this case, there is no need 1942 // to perform the last bit test, as it will always be true. Instead, make 1943 // the second-to-last bit-test fall through to the target of the last bit 1944 // test, and delete the last bit test. 1945 1946 MachineBasicBlock *NextMBB; 1947 if (BTB.ContiguousRange && j + 2 == ej) { 1948 // Second-to-last bit-test with contiguous range: fall through to the 1949 // target of the final bit test. 1950 NextMBB = BTB.Cases[j + 1].TargetBB; 1951 } else if (j + 1 == ej) { 1952 // For the last bit test, fall through to Default. 1953 NextMBB = BTB.Default; 1954 } else { 1955 // Otherwise, fall through to the next bit test. 1956 NextMBB = BTB.Cases[j + 1].ThisBB; 1957 } 1958 1959 SDB->visitBitTestCase(BTB, NextMBB, UnhandledProb, BTB.Reg, BTB.Cases[j], 1960 FuncInfo->MBB); 1961 1962 CurDAG->setRoot(SDB->getRoot()); 1963 SDB->clear(); 1964 CodeGenAndEmitDAG(); 1965 1966 if (BTB.ContiguousRange && j + 2 == ej) { 1967 // Since we're not going to use the final bit test, remove it. 1968 BTB.Cases.pop_back(); 1969 break; 1970 } 1971 } 1972 1973 // Update PHI Nodes 1974 for (unsigned pi = 0, pe = FuncInfo->PHINodesToUpdate.size(); 1975 pi != pe; ++pi) { 1976 MachineInstrBuilder PHI(*MF, FuncInfo->PHINodesToUpdate[pi].first); 1977 MachineBasicBlock *PHIBB = PHI->getParent(); 1978 assert(PHI->isPHI() && 1979 "This is not a machine PHI node that we are updating!"); 1980 // This is "default" BB. We have two jumps to it. From "header" BB and 1981 // from last "case" BB, unless the latter was skipped. 1982 if (PHIBB == BTB.Default) { 1983 PHI.addReg(FuncInfo->PHINodesToUpdate[pi].second).addMBB(BTB.Parent); 1984 if (!BTB.ContiguousRange) { 1985 PHI.addReg(FuncInfo->PHINodesToUpdate[pi].second) 1986 .addMBB(BTB.Cases.back().ThisBB); 1987 } 1988 } 1989 // One of "cases" BB. 1990 for (unsigned j = 0, ej = BTB.Cases.size(); 1991 j != ej; ++j) { 1992 MachineBasicBlock* cBB = BTB.Cases[j].ThisBB; 1993 if (cBB->isSuccessor(PHIBB)) 1994 PHI.addReg(FuncInfo->PHINodesToUpdate[pi].second).addMBB(cBB); 1995 } 1996 } 1997 } 1998 SDB->BitTestCases.clear(); 1999 2000 // If the JumpTable record is filled in, then we need to emit a jump table. 2001 // Updating the PHI nodes is tricky in this case, since we need to determine 2002 // whether the PHI is a successor of the range check MBB or the jump table MBB 2003 for (unsigned i = 0, e = SDB->JTCases.size(); i != e; ++i) { 2004 // Lower header first, if it wasn't already lowered 2005 if (!SDB->JTCases[i].first.Emitted) { 2006 // Set the current basic block to the mbb we wish to insert the code into 2007 FuncInfo->MBB = SDB->JTCases[i].first.HeaderBB; 2008 FuncInfo->InsertPt = FuncInfo->MBB->end(); 2009 // Emit the code 2010 SDB->visitJumpTableHeader(SDB->JTCases[i].second, SDB->JTCases[i].first, 2011 FuncInfo->MBB); 2012 CurDAG->setRoot(SDB->getRoot()); 2013 SDB->clear(); 2014 CodeGenAndEmitDAG(); 2015 } 2016 2017 // Set the current basic block to the mbb we wish to insert the code into 2018 FuncInfo->MBB = SDB->JTCases[i].second.MBB; 2019 FuncInfo->InsertPt = FuncInfo->MBB->end(); 2020 // Emit the code 2021 SDB->visitJumpTable(SDB->JTCases[i].second); 2022 CurDAG->setRoot(SDB->getRoot()); 2023 SDB->clear(); 2024 CodeGenAndEmitDAG(); 2025 2026 // Update PHI Nodes 2027 for (unsigned pi = 0, pe = FuncInfo->PHINodesToUpdate.size(); 2028 pi != pe; ++pi) { 2029 MachineInstrBuilder PHI(*MF, FuncInfo->PHINodesToUpdate[pi].first); 2030 MachineBasicBlock *PHIBB = PHI->getParent(); 2031 assert(PHI->isPHI() && 2032 "This is not a machine PHI node that we are updating!"); 2033 // "default" BB. We can go there only from header BB. 2034 if (PHIBB == SDB->JTCases[i].second.Default) 2035 PHI.addReg(FuncInfo->PHINodesToUpdate[pi].second) 2036 .addMBB(SDB->JTCases[i].first.HeaderBB); 2037 // JT BB. Just iterate over successors here 2038 if (FuncInfo->MBB->isSuccessor(PHIBB)) 2039 PHI.addReg(FuncInfo->PHINodesToUpdate[pi].second).addMBB(FuncInfo->MBB); 2040 } 2041 } 2042 SDB->JTCases.clear(); 2043 2044 // If we generated any switch lowering information, build and codegen any 2045 // additional DAGs necessary. 2046 for (unsigned i = 0, e = SDB->SwitchCases.size(); i != e; ++i) { 2047 // Set the current basic block to the mbb we wish to insert the code into 2048 FuncInfo->MBB = SDB->SwitchCases[i].ThisBB; 2049 FuncInfo->InsertPt = FuncInfo->MBB->end(); 2050 2051 // Determine the unique successors. 2052 SmallVector<MachineBasicBlock *, 2> Succs; 2053 Succs.push_back(SDB->SwitchCases[i].TrueBB); 2054 if (SDB->SwitchCases[i].TrueBB != SDB->SwitchCases[i].FalseBB) 2055 Succs.push_back(SDB->SwitchCases[i].FalseBB); 2056 2057 // Emit the code. Note that this could result in FuncInfo->MBB being split. 2058 SDB->visitSwitchCase(SDB->SwitchCases[i], FuncInfo->MBB); 2059 CurDAG->setRoot(SDB->getRoot()); 2060 SDB->clear(); 2061 CodeGenAndEmitDAG(); 2062 2063 // Remember the last block, now that any splitting is done, for use in 2064 // populating PHI nodes in successors. 2065 MachineBasicBlock *ThisBB = FuncInfo->MBB; 2066 2067 // Handle any PHI nodes in successors of this chunk, as if we were coming 2068 // from the original BB before switch expansion. Note that PHI nodes can 2069 // occur multiple times in PHINodesToUpdate. We have to be very careful to 2070 // handle them the right number of times. 2071 for (unsigned i = 0, e = Succs.size(); i != e; ++i) { 2072 FuncInfo->MBB = Succs[i]; 2073 FuncInfo->InsertPt = FuncInfo->MBB->end(); 2074 // FuncInfo->MBB may have been removed from the CFG if a branch was 2075 // constant folded. 2076 if (ThisBB->isSuccessor(FuncInfo->MBB)) { 2077 for (MachineBasicBlock::iterator 2078 MBBI = FuncInfo->MBB->begin(), MBBE = FuncInfo->MBB->end(); 2079 MBBI != MBBE && MBBI->isPHI(); ++MBBI) { 2080 MachineInstrBuilder PHI(*MF, MBBI); 2081 // This value for this PHI node is recorded in PHINodesToUpdate. 2082 for (unsigned pn = 0; ; ++pn) { 2083 assert(pn != FuncInfo->PHINodesToUpdate.size() && 2084 "Didn't find PHI entry!"); 2085 if (FuncInfo->PHINodesToUpdate[pn].first == PHI) { 2086 PHI.addReg(FuncInfo->PHINodesToUpdate[pn].second).addMBB(ThisBB); 2087 break; 2088 } 2089 } 2090 } 2091 } 2092 } 2093 } 2094 SDB->SwitchCases.clear(); 2095 } 2096 2097 /// Create the scheduler. If a specific scheduler was specified 2098 /// via the SchedulerRegistry, use it, otherwise select the 2099 /// one preferred by the target. 2100 /// 2101 ScheduleDAGSDNodes *SelectionDAGISel::CreateScheduler() { 2102 return ISHeuristic(this, OptLevel); 2103 } 2104 2105 //===----------------------------------------------------------------------===// 2106 // Helper functions used by the generated instruction selector. 2107 //===----------------------------------------------------------------------===// 2108 // Calls to these methods are generated by tblgen. 2109 2110 /// CheckAndMask - The isel is trying to match something like (and X, 255). If 2111 /// the dag combiner simplified the 255, we still want to match. RHS is the 2112 /// actual value in the DAG on the RHS of an AND, and DesiredMaskS is the value 2113 /// specified in the .td file (e.g. 255). 2114 bool SelectionDAGISel::CheckAndMask(SDValue LHS, ConstantSDNode *RHS, 2115 int64_t DesiredMaskS) const { 2116 const APInt &ActualMask = RHS->getAPIntValue(); 2117 const APInt &DesiredMask = APInt(LHS.getValueSizeInBits(), DesiredMaskS); 2118 2119 // If the actual mask exactly matches, success! 2120 if (ActualMask == DesiredMask) 2121 return true; 2122 2123 // If the actual AND mask is allowing unallowed bits, this doesn't match. 2124 if (!ActualMask.isSubsetOf(DesiredMask)) 2125 return false; 2126 2127 // Otherwise, the DAG Combiner may have proven that the value coming in is 2128 // either already zero or is not demanded. Check for known zero input bits. 2129 APInt NeededMask = DesiredMask & ~ActualMask; 2130 if (CurDAG->MaskedValueIsZero(LHS, NeededMask)) 2131 return true; 2132 2133 // TODO: check to see if missing bits are just not demanded. 2134 2135 // Otherwise, this pattern doesn't match. 2136 return false; 2137 } 2138 2139 /// CheckOrMask - The isel is trying to match something like (or X, 255). If 2140 /// the dag combiner simplified the 255, we still want to match. RHS is the 2141 /// actual value in the DAG on the RHS of an OR, and DesiredMaskS is the value 2142 /// specified in the .td file (e.g. 255). 2143 bool SelectionDAGISel::CheckOrMask(SDValue LHS, ConstantSDNode *RHS, 2144 int64_t DesiredMaskS) const { 2145 const APInt &ActualMask = RHS->getAPIntValue(); 2146 const APInt &DesiredMask = APInt(LHS.getValueSizeInBits(), DesiredMaskS); 2147 2148 // If the actual mask exactly matches, success! 2149 if (ActualMask == DesiredMask) 2150 return true; 2151 2152 // If the actual AND mask is allowing unallowed bits, this doesn't match. 2153 if (!ActualMask.isSubsetOf(DesiredMask)) 2154 return false; 2155 2156 // Otherwise, the DAG Combiner may have proven that the value coming in is 2157 // either already zero or is not demanded. Check for known zero input bits. 2158 APInt NeededMask = DesiredMask & ~ActualMask; 2159 2160 KnownBits Known; 2161 CurDAG->computeKnownBits(LHS, Known); 2162 2163 // If all the missing bits in the or are already known to be set, match! 2164 if (NeededMask.isSubsetOf(Known.One)) 2165 return true; 2166 2167 // TODO: check to see if missing bits are just not demanded. 2168 2169 // Otherwise, this pattern doesn't match. 2170 return false; 2171 } 2172 2173 /// SelectInlineAsmMemoryOperands - Calls to this are automatically generated 2174 /// by tblgen. Others should not call it. 2175 void SelectionDAGISel::SelectInlineAsmMemoryOperands(std::vector<SDValue> &Ops, 2176 const SDLoc &DL) { 2177 std::vector<SDValue> InOps; 2178 std::swap(InOps, Ops); 2179 2180 Ops.push_back(InOps[InlineAsm::Op_InputChain]); // 0 2181 Ops.push_back(InOps[InlineAsm::Op_AsmString]); // 1 2182 Ops.push_back(InOps[InlineAsm::Op_MDNode]); // 2, !srcloc 2183 Ops.push_back(InOps[InlineAsm::Op_ExtraInfo]); // 3 (SideEffect, AlignStack) 2184 2185 unsigned i = InlineAsm::Op_FirstOperand, e = InOps.size(); 2186 if (InOps[e-1].getValueType() == MVT::Glue) 2187 --e; // Don't process a glue operand if it is here. 2188 2189 while (i != e) { 2190 unsigned Flags = cast<ConstantSDNode>(InOps[i])->getZExtValue(); 2191 if (!InlineAsm::isMemKind(Flags)) { 2192 // Just skip over this operand, copying the operands verbatim. 2193 Ops.insert(Ops.end(), InOps.begin()+i, 2194 InOps.begin()+i+InlineAsm::getNumOperandRegisters(Flags) + 1); 2195 i += InlineAsm::getNumOperandRegisters(Flags) + 1; 2196 } else { 2197 assert(InlineAsm::getNumOperandRegisters(Flags) == 1 && 2198 "Memory operand with multiple values?"); 2199 2200 unsigned TiedToOperand; 2201 if (InlineAsm::isUseOperandTiedToDef(Flags, TiedToOperand)) { 2202 // We need the constraint ID from the operand this is tied to. 2203 unsigned CurOp = InlineAsm::Op_FirstOperand; 2204 Flags = cast<ConstantSDNode>(InOps[CurOp])->getZExtValue(); 2205 for (; TiedToOperand; --TiedToOperand) { 2206 CurOp += InlineAsm::getNumOperandRegisters(Flags)+1; 2207 Flags = cast<ConstantSDNode>(InOps[CurOp])->getZExtValue(); 2208 } 2209 } 2210 2211 // Otherwise, this is a memory operand. Ask the target to select it. 2212 std::vector<SDValue> SelOps; 2213 unsigned ConstraintID = InlineAsm::getMemoryConstraintID(Flags); 2214 if (SelectInlineAsmMemoryOperand(InOps[i+1], ConstraintID, SelOps)) 2215 report_fatal_error("Could not match memory address. Inline asm" 2216 " failure!"); 2217 2218 // Add this to the output node. 2219 unsigned NewFlags = 2220 InlineAsm::getFlagWord(InlineAsm::Kind_Mem, SelOps.size()); 2221 NewFlags = InlineAsm::getFlagWordForMem(NewFlags, ConstraintID); 2222 Ops.push_back(CurDAG->getTargetConstant(NewFlags, DL, MVT::i32)); 2223 Ops.insert(Ops.end(), SelOps.begin(), SelOps.end()); 2224 i += 2; 2225 } 2226 } 2227 2228 // Add the glue input back if present. 2229 if (e != InOps.size()) 2230 Ops.push_back(InOps.back()); 2231 } 2232 2233 /// findGlueUse - Return use of MVT::Glue value produced by the specified 2234 /// SDNode. 2235 /// 2236 static SDNode *findGlueUse(SDNode *N) { 2237 unsigned FlagResNo = N->getNumValues()-1; 2238 for (SDNode::use_iterator I = N->use_begin(), E = N->use_end(); I != E; ++I) { 2239 SDUse &Use = I.getUse(); 2240 if (Use.getResNo() == FlagResNo) 2241 return Use.getUser(); 2242 } 2243 return nullptr; 2244 } 2245 2246 /// findNonImmUse - Return true if "Def" is a predecessor of "Root" via a path 2247 /// beyond "ImmedUse". We may ignore chains as they are checked separately. 2248 static bool findNonImmUse(SDNode *Root, SDNode *Def, SDNode *ImmedUse, 2249 bool IgnoreChains) { 2250 SmallPtrSet<const SDNode *, 16> Visited; 2251 SmallVector<const SDNode *, 16> WorkList; 2252 // Only check if we have non-immediate uses of Def. 2253 if (ImmedUse->isOnlyUserOf(Def)) 2254 return false; 2255 2256 // We don't care about paths to Def that go through ImmedUse so mark it 2257 // visited and mark non-def operands as used. 2258 Visited.insert(ImmedUse); 2259 for (const SDValue &Op : ImmedUse->op_values()) { 2260 SDNode *N = Op.getNode(); 2261 // Ignore chain deps (they are validated by 2262 // HandleMergeInputChains) and immediate uses 2263 if ((Op.getValueType() == MVT::Other && IgnoreChains) || N == Def) 2264 continue; 2265 if (!Visited.insert(N).second) 2266 continue; 2267 WorkList.push_back(N); 2268 } 2269 2270 // Initialize worklist to operands of Root. 2271 if (Root != ImmedUse) { 2272 for (const SDValue &Op : Root->op_values()) { 2273 SDNode *N = Op.getNode(); 2274 // Ignore chains (they are validated by HandleMergeInputChains) 2275 if ((Op.getValueType() == MVT::Other && IgnoreChains) || N == Def) 2276 continue; 2277 if (!Visited.insert(N).second) 2278 continue; 2279 WorkList.push_back(N); 2280 } 2281 } 2282 2283 return SDNode::hasPredecessorHelper(Def, Visited, WorkList, 0, true); 2284 } 2285 2286 /// IsProfitableToFold - Returns true if it's profitable to fold the specific 2287 /// operand node N of U during instruction selection that starts at Root. 2288 bool SelectionDAGISel::IsProfitableToFold(SDValue N, SDNode *U, 2289 SDNode *Root) const { 2290 if (OptLevel == CodeGenOpt::None) return false; 2291 return N.hasOneUse(); 2292 } 2293 2294 /// IsLegalToFold - Returns true if the specific operand node N of 2295 /// U can be folded during instruction selection that starts at Root. 2296 bool SelectionDAGISel::IsLegalToFold(SDValue N, SDNode *U, SDNode *Root, 2297 CodeGenOpt::Level OptLevel, 2298 bool IgnoreChains) { 2299 if (OptLevel == CodeGenOpt::None) return false; 2300 2301 // If Root use can somehow reach N through a path that that doesn't contain 2302 // U then folding N would create a cycle. e.g. In the following 2303 // diagram, Root can reach N through X. If N is folded into Root, then 2304 // X is both a predecessor and a successor of U. 2305 // 2306 // [N*] // 2307 // ^ ^ // 2308 // / \ // 2309 // [U*] [X]? // 2310 // ^ ^ // 2311 // \ / // 2312 // \ / // 2313 // [Root*] // 2314 // 2315 // * indicates nodes to be folded together. 2316 // 2317 // If Root produces glue, then it gets (even more) interesting. Since it 2318 // will be "glued" together with its glue use in the scheduler, we need to 2319 // check if it might reach N. 2320 // 2321 // [N*] // 2322 // ^ ^ // 2323 // / \ // 2324 // [U*] [X]? // 2325 // ^ ^ // 2326 // \ \ // 2327 // \ | // 2328 // [Root*] | // 2329 // ^ | // 2330 // f | // 2331 // | / // 2332 // [Y] / // 2333 // ^ / // 2334 // f / // 2335 // | / // 2336 // [GU] // 2337 // 2338 // If GU (glue use) indirectly reaches N (the load), and Root folds N 2339 // (call it Fold), then X is a predecessor of GU and a successor of 2340 // Fold. But since Fold and GU are glued together, this will create 2341 // a cycle in the scheduling graph. 2342 2343 // If the node has glue, walk down the graph to the "lowest" node in the 2344 // glueged set. 2345 EVT VT = Root->getValueType(Root->getNumValues()-1); 2346 while (VT == MVT::Glue) { 2347 SDNode *GU = findGlueUse(Root); 2348 if (!GU) 2349 break; 2350 Root = GU; 2351 VT = Root->getValueType(Root->getNumValues()-1); 2352 2353 // If our query node has a glue result with a use, we've walked up it. If 2354 // the user (which has already been selected) has a chain or indirectly uses 2355 // the chain, HandleMergeInputChains will not consider it. Because of 2356 // this, we cannot ignore chains in this predicate. 2357 IgnoreChains = false; 2358 } 2359 2360 return !findNonImmUse(Root, N.getNode(), U, IgnoreChains); 2361 } 2362 2363 void SelectionDAGISel::Select_INLINEASM(SDNode *N) { 2364 SDLoc DL(N); 2365 2366 std::vector<SDValue> Ops(N->op_begin(), N->op_end()); 2367 SelectInlineAsmMemoryOperands(Ops, DL); 2368 2369 const EVT VTs[] = {MVT::Other, MVT::Glue}; 2370 SDValue New = CurDAG->getNode(ISD::INLINEASM, DL, VTs, Ops); 2371 New->setNodeId(-1); 2372 ReplaceUses(N, New.getNode()); 2373 CurDAG->RemoveDeadNode(N); 2374 } 2375 2376 void SelectionDAGISel::Select_READ_REGISTER(SDNode *Op) { 2377 SDLoc dl(Op); 2378 MDNodeSDNode *MD = dyn_cast<MDNodeSDNode>(Op->getOperand(1)); 2379 const MDString *RegStr = dyn_cast<MDString>(MD->getMD()->getOperand(0)); 2380 unsigned Reg = 2381 TLI->getRegisterByName(RegStr->getString().data(), Op->getValueType(0), 2382 *CurDAG); 2383 SDValue New = CurDAG->getCopyFromReg( 2384 Op->getOperand(0), dl, Reg, Op->getValueType(0)); 2385 New->setNodeId(-1); 2386 ReplaceUses(Op, New.getNode()); 2387 CurDAG->RemoveDeadNode(Op); 2388 } 2389 2390 void SelectionDAGISel::Select_WRITE_REGISTER(SDNode *Op) { 2391 SDLoc dl(Op); 2392 MDNodeSDNode *MD = dyn_cast<MDNodeSDNode>(Op->getOperand(1)); 2393 const MDString *RegStr = dyn_cast<MDString>(MD->getMD()->getOperand(0)); 2394 unsigned Reg = TLI->getRegisterByName(RegStr->getString().data(), 2395 Op->getOperand(2).getValueType(), 2396 *CurDAG); 2397 SDValue New = CurDAG->getCopyToReg( 2398 Op->getOperand(0), dl, Reg, Op->getOperand(2)); 2399 New->setNodeId(-1); 2400 ReplaceUses(Op, New.getNode()); 2401 CurDAG->RemoveDeadNode(Op); 2402 } 2403 2404 void SelectionDAGISel::Select_UNDEF(SDNode *N) { 2405 CurDAG->SelectNodeTo(N, TargetOpcode::IMPLICIT_DEF, N->getValueType(0)); 2406 } 2407 2408 /// GetVBR - decode a vbr encoding whose top bit is set. 2409 LLVM_ATTRIBUTE_ALWAYS_INLINE static inline uint64_t 2410 GetVBR(uint64_t Val, const unsigned char *MatcherTable, unsigned &Idx) { 2411 assert(Val >= 128 && "Not a VBR"); 2412 Val &= 127; // Remove first vbr bit. 2413 2414 unsigned Shift = 7; 2415 uint64_t NextBits; 2416 do { 2417 NextBits = MatcherTable[Idx++]; 2418 Val |= (NextBits&127) << Shift; 2419 Shift += 7; 2420 } while (NextBits & 128); 2421 2422 return Val; 2423 } 2424 2425 /// When a match is complete, this method updates uses of interior chain results 2426 /// to use the new results. 2427 void SelectionDAGISel::UpdateChains( 2428 SDNode *NodeToMatch, SDValue InputChain, 2429 SmallVectorImpl<SDNode *> &ChainNodesMatched, bool isMorphNodeTo) { 2430 SmallVector<SDNode*, 4> NowDeadNodes; 2431 2432 // Now that all the normal results are replaced, we replace the chain and 2433 // glue results if present. 2434 if (!ChainNodesMatched.empty()) { 2435 assert(InputChain.getNode() && 2436 "Matched input chains but didn't produce a chain"); 2437 // Loop over all of the nodes we matched that produced a chain result. 2438 // Replace all the chain results with the final chain we ended up with. 2439 for (unsigned i = 0, e = ChainNodesMatched.size(); i != e; ++i) { 2440 SDNode *ChainNode = ChainNodesMatched[i]; 2441 // If ChainNode is null, it's because we replaced it on a previous 2442 // iteration and we cleared it out of the map. Just skip it. 2443 if (!ChainNode) 2444 continue; 2445 2446 assert(ChainNode->getOpcode() != ISD::DELETED_NODE && 2447 "Deleted node left in chain"); 2448 2449 // Don't replace the results of the root node if we're doing a 2450 // MorphNodeTo. 2451 if (ChainNode == NodeToMatch && isMorphNodeTo) 2452 continue; 2453 2454 SDValue ChainVal = SDValue(ChainNode, ChainNode->getNumValues()-1); 2455 if (ChainVal.getValueType() == MVT::Glue) 2456 ChainVal = ChainVal.getValue(ChainVal->getNumValues()-2); 2457 assert(ChainVal.getValueType() == MVT::Other && "Not a chain?"); 2458 SelectionDAG::DAGNodeDeletedListener NDL( 2459 *CurDAG, [&](SDNode *N, SDNode *E) { 2460 std::replace(ChainNodesMatched.begin(), ChainNodesMatched.end(), N, 2461 static_cast<SDNode *>(nullptr)); 2462 }); 2463 if (ChainNode->getOpcode() != ISD::TokenFactor) 2464 ReplaceUses(ChainVal, InputChain); 2465 2466 // If the node became dead and we haven't already seen it, delete it. 2467 if (ChainNode != NodeToMatch && ChainNode->use_empty() && 2468 !std::count(NowDeadNodes.begin(), NowDeadNodes.end(), ChainNode)) 2469 NowDeadNodes.push_back(ChainNode); 2470 } 2471 } 2472 2473 if (!NowDeadNodes.empty()) 2474 CurDAG->RemoveDeadNodes(NowDeadNodes); 2475 2476 DEBUG(dbgs() << "ISEL: Match complete!\n"); 2477 } 2478 2479 /// HandleMergeInputChains - This implements the OPC_EmitMergeInputChains 2480 /// operation for when the pattern matched at least one node with a chains. The 2481 /// input vector contains a list of all of the chained nodes that we match. We 2482 /// must determine if this is a valid thing to cover (i.e. matching it won't 2483 /// induce cycles in the DAG) and if so, creating a TokenFactor node. that will 2484 /// be used as the input node chain for the generated nodes. 2485 static SDValue 2486 HandleMergeInputChains(SmallVectorImpl<SDNode*> &ChainNodesMatched, 2487 SelectionDAG *CurDAG) { 2488 2489 SmallPtrSet<const SDNode *, 16> Visited; 2490 SmallVector<const SDNode *, 8> Worklist; 2491 SmallVector<SDValue, 3> InputChains; 2492 unsigned int Max = 8192; 2493 2494 // Quick exit on trivial merge. 2495 if (ChainNodesMatched.size() == 1) 2496 return ChainNodesMatched[0]->getOperand(0); 2497 2498 // Add chains that aren't already added (internal). Peek through 2499 // token factors. 2500 std::function<void(const SDValue)> AddChains = [&](const SDValue V) { 2501 if (V.getValueType() != MVT::Other) 2502 return; 2503 if (V->getOpcode() == ISD::EntryToken) 2504 return; 2505 if (!Visited.insert(V.getNode()).second) 2506 return; 2507 if (V->getOpcode() == ISD::TokenFactor) { 2508 for (const SDValue &Op : V->op_values()) 2509 AddChains(Op); 2510 } else 2511 InputChains.push_back(V); 2512 }; 2513 2514 for (auto *N : ChainNodesMatched) { 2515 Worklist.push_back(N); 2516 Visited.insert(N); 2517 } 2518 2519 while (!Worklist.empty()) 2520 AddChains(Worklist.pop_back_val()->getOperand(0)); 2521 2522 // Skip the search if there are no chain dependencies. 2523 if (InputChains.size() == 0) 2524 return CurDAG->getEntryNode(); 2525 2526 // If one of these chains is a successor of input, we must have a 2527 // node that is both the predecessor and successor of the 2528 // to-be-merged nodes. Fail. 2529 Visited.clear(); 2530 for (SDValue V : InputChains) 2531 Worklist.push_back(V.getNode()); 2532 2533 for (auto *N : ChainNodesMatched) 2534 if (SDNode::hasPredecessorHelper(N, Visited, Worklist, Max, true)) 2535 return SDValue(); 2536 2537 // Return merged chain. 2538 if (InputChains.size() == 1) 2539 return InputChains[0]; 2540 return CurDAG->getNode(ISD::TokenFactor, SDLoc(ChainNodesMatched[0]), 2541 MVT::Other, InputChains); 2542 } 2543 2544 /// MorphNode - Handle morphing a node in place for the selector. 2545 SDNode *SelectionDAGISel:: 2546 MorphNode(SDNode *Node, unsigned TargetOpc, SDVTList VTList, 2547 ArrayRef<SDValue> Ops, unsigned EmitNodeInfo) { 2548 // It is possible we're using MorphNodeTo to replace a node with no 2549 // normal results with one that has a normal result (or we could be 2550 // adding a chain) and the input could have glue and chains as well. 2551 // In this case we need to shift the operands down. 2552 // FIXME: This is a horrible hack and broken in obscure cases, no worse 2553 // than the old isel though. 2554 int OldGlueResultNo = -1, OldChainResultNo = -1; 2555 2556 unsigned NTMNumResults = Node->getNumValues(); 2557 if (Node->getValueType(NTMNumResults-1) == MVT::Glue) { 2558 OldGlueResultNo = NTMNumResults-1; 2559 if (NTMNumResults != 1 && 2560 Node->getValueType(NTMNumResults-2) == MVT::Other) 2561 OldChainResultNo = NTMNumResults-2; 2562 } else if (Node->getValueType(NTMNumResults-1) == MVT::Other) 2563 OldChainResultNo = NTMNumResults-1; 2564 2565 // Call the underlying SelectionDAG routine to do the transmogrification. Note 2566 // that this deletes operands of the old node that become dead. 2567 SDNode *Res = CurDAG->MorphNodeTo(Node, ~TargetOpc, VTList, Ops); 2568 2569 // MorphNodeTo can operate in two ways: if an existing node with the 2570 // specified operands exists, it can just return it. Otherwise, it 2571 // updates the node in place to have the requested operands. 2572 if (Res == Node) { 2573 // If we updated the node in place, reset the node ID. To the isel, 2574 // this should be just like a newly allocated machine node. 2575 Res->setNodeId(-1); 2576 } 2577 2578 unsigned ResNumResults = Res->getNumValues(); 2579 // Move the glue if needed. 2580 if ((EmitNodeInfo & OPFL_GlueOutput) && OldGlueResultNo != -1 && 2581 (unsigned)OldGlueResultNo != ResNumResults-1) 2582 ReplaceUses(SDValue(Node, OldGlueResultNo), 2583 SDValue(Res, ResNumResults - 1)); 2584 2585 if ((EmitNodeInfo & OPFL_GlueOutput) != 0) 2586 --ResNumResults; 2587 2588 // Move the chain reference if needed. 2589 if ((EmitNodeInfo & OPFL_Chain) && OldChainResultNo != -1 && 2590 (unsigned)OldChainResultNo != ResNumResults-1) 2591 ReplaceUses(SDValue(Node, OldChainResultNo), 2592 SDValue(Res, ResNumResults - 1)); 2593 2594 // Otherwise, no replacement happened because the node already exists. Replace 2595 // Uses of the old node with the new one. 2596 if (Res != Node) { 2597 ReplaceNode(Node, Res); 2598 } else { 2599 EnforceNodeIdInvariant(Res); 2600 } 2601 2602 return Res; 2603 } 2604 2605 /// CheckSame - Implements OP_CheckSame. 2606 LLVM_ATTRIBUTE_ALWAYS_INLINE static inline bool 2607 CheckSame(const unsigned char *MatcherTable, unsigned &MatcherIndex, 2608 SDValue N, 2609 const SmallVectorImpl<std::pair<SDValue, SDNode*>> &RecordedNodes) { 2610 // Accept if it is exactly the same as a previously recorded node. 2611 unsigned RecNo = MatcherTable[MatcherIndex++]; 2612 assert(RecNo < RecordedNodes.size() && "Invalid CheckSame"); 2613 return N == RecordedNodes[RecNo].first; 2614 } 2615 2616 /// CheckChildSame - Implements OP_CheckChildXSame. 2617 LLVM_ATTRIBUTE_ALWAYS_INLINE static inline bool 2618 CheckChildSame(const unsigned char *MatcherTable, unsigned &MatcherIndex, 2619 SDValue N, 2620 const SmallVectorImpl<std::pair<SDValue, SDNode*>> &RecordedNodes, 2621 unsigned ChildNo) { 2622 if (ChildNo >= N.getNumOperands()) 2623 return false; // Match fails if out of range child #. 2624 return ::CheckSame(MatcherTable, MatcherIndex, N.getOperand(ChildNo), 2625 RecordedNodes); 2626 } 2627 2628 /// CheckPatternPredicate - Implements OP_CheckPatternPredicate. 2629 LLVM_ATTRIBUTE_ALWAYS_INLINE static inline bool 2630 CheckPatternPredicate(const unsigned char *MatcherTable, unsigned &MatcherIndex, 2631 const SelectionDAGISel &SDISel) { 2632 return SDISel.CheckPatternPredicate(MatcherTable[MatcherIndex++]); 2633 } 2634 2635 /// CheckNodePredicate - Implements OP_CheckNodePredicate. 2636 LLVM_ATTRIBUTE_ALWAYS_INLINE static inline bool 2637 CheckNodePredicate(const unsigned char *MatcherTable, unsigned &MatcherIndex, 2638 const SelectionDAGISel &SDISel, SDNode *N) { 2639 return SDISel.CheckNodePredicate(N, MatcherTable[MatcherIndex++]); 2640 } 2641 2642 LLVM_ATTRIBUTE_ALWAYS_INLINE static inline bool 2643 CheckOpcode(const unsigned char *MatcherTable, unsigned &MatcherIndex, 2644 SDNode *N) { 2645 uint16_t Opc = MatcherTable[MatcherIndex++]; 2646 Opc |= (unsigned short)MatcherTable[MatcherIndex++] << 8; 2647 return N->getOpcode() == Opc; 2648 } 2649 2650 LLVM_ATTRIBUTE_ALWAYS_INLINE static inline bool 2651 CheckType(const unsigned char *MatcherTable, unsigned &MatcherIndex, SDValue N, 2652 const TargetLowering *TLI, const DataLayout &DL) { 2653 MVT::SimpleValueType VT = (MVT::SimpleValueType)MatcherTable[MatcherIndex++]; 2654 if (N.getValueType() == VT) return true; 2655 2656 // Handle the case when VT is iPTR. 2657 return VT == MVT::iPTR && N.getValueType() == TLI->getPointerTy(DL); 2658 } 2659 2660 LLVM_ATTRIBUTE_ALWAYS_INLINE static inline bool 2661 CheckChildType(const unsigned char *MatcherTable, unsigned &MatcherIndex, 2662 SDValue N, const TargetLowering *TLI, const DataLayout &DL, 2663 unsigned ChildNo) { 2664 if (ChildNo >= N.getNumOperands()) 2665 return false; // Match fails if out of range child #. 2666 return ::CheckType(MatcherTable, MatcherIndex, N.getOperand(ChildNo), TLI, 2667 DL); 2668 } 2669 2670 LLVM_ATTRIBUTE_ALWAYS_INLINE static inline bool 2671 CheckCondCode(const unsigned char *MatcherTable, unsigned &MatcherIndex, 2672 SDValue N) { 2673 return cast<CondCodeSDNode>(N)->get() == 2674 (ISD::CondCode)MatcherTable[MatcherIndex++]; 2675 } 2676 2677 LLVM_ATTRIBUTE_ALWAYS_INLINE static inline bool 2678 CheckValueType(const unsigned char *MatcherTable, unsigned &MatcherIndex, 2679 SDValue N, const TargetLowering *TLI, const DataLayout &DL) { 2680 MVT::SimpleValueType VT = (MVT::SimpleValueType)MatcherTable[MatcherIndex++]; 2681 if (cast<VTSDNode>(N)->getVT() == VT) 2682 return true; 2683 2684 // Handle the case when VT is iPTR. 2685 return VT == MVT::iPTR && cast<VTSDNode>(N)->getVT() == TLI->getPointerTy(DL); 2686 } 2687 2688 LLVM_ATTRIBUTE_ALWAYS_INLINE static inline bool 2689 CheckInteger(const unsigned char *MatcherTable, unsigned &MatcherIndex, 2690 SDValue N) { 2691 int64_t Val = MatcherTable[MatcherIndex++]; 2692 if (Val & 128) 2693 Val = GetVBR(Val, MatcherTable, MatcherIndex); 2694 2695 ConstantSDNode *C = dyn_cast<ConstantSDNode>(N); 2696 return C && C->getSExtValue() == Val; 2697 } 2698 2699 LLVM_ATTRIBUTE_ALWAYS_INLINE static inline bool 2700 CheckChildInteger(const unsigned char *MatcherTable, unsigned &MatcherIndex, 2701 SDValue N, unsigned ChildNo) { 2702 if (ChildNo >= N.getNumOperands()) 2703 return false; // Match fails if out of range child #. 2704 return ::CheckInteger(MatcherTable, MatcherIndex, N.getOperand(ChildNo)); 2705 } 2706 2707 LLVM_ATTRIBUTE_ALWAYS_INLINE static inline bool 2708 CheckAndImm(const unsigned char *MatcherTable, unsigned &MatcherIndex, 2709 SDValue N, const SelectionDAGISel &SDISel) { 2710 int64_t Val = MatcherTable[MatcherIndex++]; 2711 if (Val & 128) 2712 Val = GetVBR(Val, MatcherTable, MatcherIndex); 2713 2714 if (N->getOpcode() != ISD::AND) return false; 2715 2716 ConstantSDNode *C = dyn_cast<ConstantSDNode>(N->getOperand(1)); 2717 return C && SDISel.CheckAndMask(N.getOperand(0), C, Val); 2718 } 2719 2720 LLVM_ATTRIBUTE_ALWAYS_INLINE static inline bool 2721 CheckOrImm(const unsigned char *MatcherTable, unsigned &MatcherIndex, 2722 SDValue N, const SelectionDAGISel &SDISel) { 2723 int64_t Val = MatcherTable[MatcherIndex++]; 2724 if (Val & 128) 2725 Val = GetVBR(Val, MatcherTable, MatcherIndex); 2726 2727 if (N->getOpcode() != ISD::OR) return false; 2728 2729 ConstantSDNode *C = dyn_cast<ConstantSDNode>(N->getOperand(1)); 2730 return C && SDISel.CheckOrMask(N.getOperand(0), C, Val); 2731 } 2732 2733 /// IsPredicateKnownToFail - If we know how and can do so without pushing a 2734 /// scope, evaluate the current node. If the current predicate is known to 2735 /// fail, set Result=true and return anything. If the current predicate is 2736 /// known to pass, set Result=false and return the MatcherIndex to continue 2737 /// with. If the current predicate is unknown, set Result=false and return the 2738 /// MatcherIndex to continue with. 2739 static unsigned IsPredicateKnownToFail(const unsigned char *Table, 2740 unsigned Index, SDValue N, 2741 bool &Result, 2742 const SelectionDAGISel &SDISel, 2743 SmallVectorImpl<std::pair<SDValue, SDNode*>> &RecordedNodes) { 2744 switch (Table[Index++]) { 2745 default: 2746 Result = false; 2747 return Index-1; // Could not evaluate this predicate. 2748 case SelectionDAGISel::OPC_CheckSame: 2749 Result = !::CheckSame(Table, Index, N, RecordedNodes); 2750 return Index; 2751 case SelectionDAGISel::OPC_CheckChild0Same: 2752 case SelectionDAGISel::OPC_CheckChild1Same: 2753 case SelectionDAGISel::OPC_CheckChild2Same: 2754 case SelectionDAGISel::OPC_CheckChild3Same: 2755 Result = !::CheckChildSame(Table, Index, N, RecordedNodes, 2756 Table[Index-1] - SelectionDAGISel::OPC_CheckChild0Same); 2757 return Index; 2758 case SelectionDAGISel::OPC_CheckPatternPredicate: 2759 Result = !::CheckPatternPredicate(Table, Index, SDISel); 2760 return Index; 2761 case SelectionDAGISel::OPC_CheckPredicate: 2762 Result = !::CheckNodePredicate(Table, Index, SDISel, N.getNode()); 2763 return Index; 2764 case SelectionDAGISel::OPC_CheckOpcode: 2765 Result = !::CheckOpcode(Table, Index, N.getNode()); 2766 return Index; 2767 case SelectionDAGISel::OPC_CheckType: 2768 Result = !::CheckType(Table, Index, N, SDISel.TLI, 2769 SDISel.CurDAG->getDataLayout()); 2770 return Index; 2771 case SelectionDAGISel::OPC_CheckTypeRes: { 2772 unsigned Res = Table[Index++]; 2773 Result = !::CheckType(Table, Index, N.getValue(Res), SDISel.TLI, 2774 SDISel.CurDAG->getDataLayout()); 2775 return Index; 2776 } 2777 case SelectionDAGISel::OPC_CheckChild0Type: 2778 case SelectionDAGISel::OPC_CheckChild1Type: 2779 case SelectionDAGISel::OPC_CheckChild2Type: 2780 case SelectionDAGISel::OPC_CheckChild3Type: 2781 case SelectionDAGISel::OPC_CheckChild4Type: 2782 case SelectionDAGISel::OPC_CheckChild5Type: 2783 case SelectionDAGISel::OPC_CheckChild6Type: 2784 case SelectionDAGISel::OPC_CheckChild7Type: 2785 Result = !::CheckChildType( 2786 Table, Index, N, SDISel.TLI, SDISel.CurDAG->getDataLayout(), 2787 Table[Index - 1] - SelectionDAGISel::OPC_CheckChild0Type); 2788 return Index; 2789 case SelectionDAGISel::OPC_CheckCondCode: 2790 Result = !::CheckCondCode(Table, Index, N); 2791 return Index; 2792 case SelectionDAGISel::OPC_CheckValueType: 2793 Result = !::CheckValueType(Table, Index, N, SDISel.TLI, 2794 SDISel.CurDAG->getDataLayout()); 2795 return Index; 2796 case SelectionDAGISel::OPC_CheckInteger: 2797 Result = !::CheckInteger(Table, Index, N); 2798 return Index; 2799 case SelectionDAGISel::OPC_CheckChild0Integer: 2800 case SelectionDAGISel::OPC_CheckChild1Integer: 2801 case SelectionDAGISel::OPC_CheckChild2Integer: 2802 case SelectionDAGISel::OPC_CheckChild3Integer: 2803 case SelectionDAGISel::OPC_CheckChild4Integer: 2804 Result = !::CheckChildInteger(Table, Index, N, 2805 Table[Index-1] - SelectionDAGISel::OPC_CheckChild0Integer); 2806 return Index; 2807 case SelectionDAGISel::OPC_CheckAndImm: 2808 Result = !::CheckAndImm(Table, Index, N, SDISel); 2809 return Index; 2810 case SelectionDAGISel::OPC_CheckOrImm: 2811 Result = !::CheckOrImm(Table, Index, N, SDISel); 2812 return Index; 2813 } 2814 } 2815 2816 namespace { 2817 2818 struct MatchScope { 2819 /// FailIndex - If this match fails, this is the index to continue with. 2820 unsigned FailIndex; 2821 2822 /// NodeStack - The node stack when the scope was formed. 2823 SmallVector<SDValue, 4> NodeStack; 2824 2825 /// NumRecordedNodes - The number of recorded nodes when the scope was formed. 2826 unsigned NumRecordedNodes; 2827 2828 /// NumMatchedMemRefs - The number of matched memref entries. 2829 unsigned NumMatchedMemRefs; 2830 2831 /// InputChain/InputGlue - The current chain/glue 2832 SDValue InputChain, InputGlue; 2833 2834 /// HasChainNodesMatched - True if the ChainNodesMatched list is non-empty. 2835 bool HasChainNodesMatched; 2836 }; 2837 2838 /// \A DAG update listener to keep the matching state 2839 /// (i.e. RecordedNodes and MatchScope) uptodate if the target is allowed to 2840 /// change the DAG while matching. X86 addressing mode matcher is an example 2841 /// for this. 2842 class MatchStateUpdater : public SelectionDAG::DAGUpdateListener 2843 { 2844 SDNode **NodeToMatch; 2845 SmallVectorImpl<std::pair<SDValue, SDNode *>> &RecordedNodes; 2846 SmallVectorImpl<MatchScope> &MatchScopes; 2847 2848 public: 2849 MatchStateUpdater(SelectionDAG &DAG, SDNode **NodeToMatch, 2850 SmallVectorImpl<std::pair<SDValue, SDNode *>> &RN, 2851 SmallVectorImpl<MatchScope> &MS) 2852 : SelectionDAG::DAGUpdateListener(DAG), NodeToMatch(NodeToMatch), 2853 RecordedNodes(RN), MatchScopes(MS) {} 2854 2855 void NodeDeleted(SDNode *N, SDNode *E) override { 2856 // Some early-returns here to avoid the search if we deleted the node or 2857 // if the update comes from MorphNodeTo (MorphNodeTo is the last thing we 2858 // do, so it's unnecessary to update matching state at that point). 2859 // Neither of these can occur currently because we only install this 2860 // update listener during matching a complex patterns. 2861 if (!E || E->isMachineOpcode()) 2862 return; 2863 // Check if NodeToMatch was updated. 2864 if (N == *NodeToMatch) 2865 *NodeToMatch = E; 2866 // Performing linear search here does not matter because we almost never 2867 // run this code. You'd have to have a CSE during complex pattern 2868 // matching. 2869 for (auto &I : RecordedNodes) 2870 if (I.first.getNode() == N) 2871 I.first.setNode(E); 2872 2873 for (auto &I : MatchScopes) 2874 for (auto &J : I.NodeStack) 2875 if (J.getNode() == N) 2876 J.setNode(E); 2877 } 2878 }; 2879 2880 } // end anonymous namespace 2881 2882 void SelectionDAGISel::SelectCodeCommon(SDNode *NodeToMatch, 2883 const unsigned char *MatcherTable, 2884 unsigned TableSize) { 2885 // FIXME: Should these even be selected? Handle these cases in the caller? 2886 switch (NodeToMatch->getOpcode()) { 2887 default: 2888 break; 2889 case ISD::EntryToken: // These nodes remain the same. 2890 case ISD::BasicBlock: 2891 case ISD::Register: 2892 case ISD::RegisterMask: 2893 case ISD::HANDLENODE: 2894 case ISD::MDNODE_SDNODE: 2895 case ISD::TargetConstant: 2896 case ISD::TargetConstantFP: 2897 case ISD::TargetConstantPool: 2898 case ISD::TargetFrameIndex: 2899 case ISD::TargetExternalSymbol: 2900 case ISD::MCSymbol: 2901 case ISD::TargetBlockAddress: 2902 case ISD::TargetJumpTable: 2903 case ISD::TargetGlobalTLSAddress: 2904 case ISD::TargetGlobalAddress: 2905 case ISD::TokenFactor: 2906 case ISD::CopyFromReg: 2907 case ISD::CopyToReg: 2908 case ISD::EH_LABEL: 2909 case ISD::ANNOTATION_LABEL: 2910 case ISD::LIFETIME_START: 2911 case ISD::LIFETIME_END: 2912 NodeToMatch->setNodeId(-1); // Mark selected. 2913 return; 2914 case ISD::AssertSext: 2915 case ISD::AssertZext: 2916 ReplaceUses(SDValue(NodeToMatch, 0), NodeToMatch->getOperand(0)); 2917 CurDAG->RemoveDeadNode(NodeToMatch); 2918 return; 2919 case ISD::INLINEASM: 2920 Select_INLINEASM(NodeToMatch); 2921 return; 2922 case ISD::READ_REGISTER: 2923 Select_READ_REGISTER(NodeToMatch); 2924 return; 2925 case ISD::WRITE_REGISTER: 2926 Select_WRITE_REGISTER(NodeToMatch); 2927 return; 2928 case ISD::UNDEF: 2929 Select_UNDEF(NodeToMatch); 2930 return; 2931 } 2932 2933 assert(!NodeToMatch->isMachineOpcode() && "Node already selected!"); 2934 2935 // Set up the node stack with NodeToMatch as the only node on the stack. 2936 SmallVector<SDValue, 8> NodeStack; 2937 SDValue N = SDValue(NodeToMatch, 0); 2938 NodeStack.push_back(N); 2939 2940 // MatchScopes - Scopes used when matching, if a match failure happens, this 2941 // indicates where to continue checking. 2942 SmallVector<MatchScope, 8> MatchScopes; 2943 2944 // RecordedNodes - This is the set of nodes that have been recorded by the 2945 // state machine. The second value is the parent of the node, or null if the 2946 // root is recorded. 2947 SmallVector<std::pair<SDValue, SDNode*>, 8> RecordedNodes; 2948 2949 // MatchedMemRefs - This is the set of MemRef's we've seen in the input 2950 // pattern. 2951 SmallVector<MachineMemOperand*, 2> MatchedMemRefs; 2952 2953 // These are the current input chain and glue for use when generating nodes. 2954 // Various Emit operations change these. For example, emitting a copytoreg 2955 // uses and updates these. 2956 SDValue InputChain, InputGlue; 2957 2958 // ChainNodesMatched - If a pattern matches nodes that have input/output 2959 // chains, the OPC_EmitMergeInputChains operation is emitted which indicates 2960 // which ones they are. The result is captured into this list so that we can 2961 // update the chain results when the pattern is complete. 2962 SmallVector<SDNode*, 3> ChainNodesMatched; 2963 2964 DEBUG(dbgs() << "ISEL: Starting pattern match\n"); 2965 2966 // Determine where to start the interpreter. Normally we start at opcode #0, 2967 // but if the state machine starts with an OPC_SwitchOpcode, then we 2968 // accelerate the first lookup (which is guaranteed to be hot) with the 2969 // OpcodeOffset table. 2970 unsigned MatcherIndex = 0; 2971 2972 if (!OpcodeOffset.empty()) { 2973 // Already computed the OpcodeOffset table, just index into it. 2974 if (N.getOpcode() < OpcodeOffset.size()) 2975 MatcherIndex = OpcodeOffset[N.getOpcode()]; 2976 DEBUG(dbgs() << " Initial Opcode index to " << MatcherIndex << "\n"); 2977 2978 } else if (MatcherTable[0] == OPC_SwitchOpcode) { 2979 // Otherwise, the table isn't computed, but the state machine does start 2980 // with an OPC_SwitchOpcode instruction. Populate the table now, since this 2981 // is the first time we're selecting an instruction. 2982 unsigned Idx = 1; 2983 while (true) { 2984 // Get the size of this case. 2985 unsigned CaseSize = MatcherTable[Idx++]; 2986 if (CaseSize & 128) 2987 CaseSize = GetVBR(CaseSize, MatcherTable, Idx); 2988 if (CaseSize == 0) break; 2989 2990 // Get the opcode, add the index to the table. 2991 uint16_t Opc = MatcherTable[Idx++]; 2992 Opc |= (unsigned short)MatcherTable[Idx++] << 8; 2993 if (Opc >= OpcodeOffset.size()) 2994 OpcodeOffset.resize((Opc+1)*2); 2995 OpcodeOffset[Opc] = Idx; 2996 Idx += CaseSize; 2997 } 2998 2999 // Okay, do the lookup for the first opcode. 3000 if (N.getOpcode() < OpcodeOffset.size()) 3001 MatcherIndex = OpcodeOffset[N.getOpcode()]; 3002 } 3003 3004 while (true) { 3005 assert(MatcherIndex < TableSize && "Invalid index"); 3006 #ifndef NDEBUG 3007 unsigned CurrentOpcodeIndex = MatcherIndex; 3008 #endif 3009 BuiltinOpcodes Opcode = (BuiltinOpcodes)MatcherTable[MatcherIndex++]; 3010 switch (Opcode) { 3011 case OPC_Scope: { 3012 // Okay, the semantics of this operation are that we should push a scope 3013 // then evaluate the first child. However, pushing a scope only to have 3014 // the first check fail (which then pops it) is inefficient. If we can 3015 // determine immediately that the first check (or first several) will 3016 // immediately fail, don't even bother pushing a scope for them. 3017 unsigned FailIndex; 3018 3019 while (true) { 3020 unsigned NumToSkip = MatcherTable[MatcherIndex++]; 3021 if (NumToSkip & 128) 3022 NumToSkip = GetVBR(NumToSkip, MatcherTable, MatcherIndex); 3023 // Found the end of the scope with no match. 3024 if (NumToSkip == 0) { 3025 FailIndex = 0; 3026 break; 3027 } 3028 3029 FailIndex = MatcherIndex+NumToSkip; 3030 3031 unsigned MatcherIndexOfPredicate = MatcherIndex; 3032 (void)MatcherIndexOfPredicate; // silence warning. 3033 3034 // If we can't evaluate this predicate without pushing a scope (e.g. if 3035 // it is a 'MoveParent') or if the predicate succeeds on this node, we 3036 // push the scope and evaluate the full predicate chain. 3037 bool Result; 3038 MatcherIndex = IsPredicateKnownToFail(MatcherTable, MatcherIndex, N, 3039 Result, *this, RecordedNodes); 3040 if (!Result) 3041 break; 3042 3043 DEBUG(dbgs() << " Skipped scope entry (due to false predicate) at " 3044 << "index " << MatcherIndexOfPredicate 3045 << ", continuing at " << FailIndex << "\n"); 3046 ++NumDAGIselRetries; 3047 3048 // Otherwise, we know that this case of the Scope is guaranteed to fail, 3049 // move to the next case. 3050 MatcherIndex = FailIndex; 3051 } 3052 3053 // If the whole scope failed to match, bail. 3054 if (FailIndex == 0) break; 3055 3056 // Push a MatchScope which indicates where to go if the first child fails 3057 // to match. 3058 MatchScope NewEntry; 3059 NewEntry.FailIndex = FailIndex; 3060 NewEntry.NodeStack.append(NodeStack.begin(), NodeStack.end()); 3061 NewEntry.NumRecordedNodes = RecordedNodes.size(); 3062 NewEntry.NumMatchedMemRefs = MatchedMemRefs.size(); 3063 NewEntry.InputChain = InputChain; 3064 NewEntry.InputGlue = InputGlue; 3065 NewEntry.HasChainNodesMatched = !ChainNodesMatched.empty(); 3066 MatchScopes.push_back(NewEntry); 3067 continue; 3068 } 3069 case OPC_RecordNode: { 3070 // Remember this node, it may end up being an operand in the pattern. 3071 SDNode *Parent = nullptr; 3072 if (NodeStack.size() > 1) 3073 Parent = NodeStack[NodeStack.size()-2].getNode(); 3074 RecordedNodes.push_back(std::make_pair(N, Parent)); 3075 continue; 3076 } 3077 3078 case OPC_RecordChild0: case OPC_RecordChild1: 3079 case OPC_RecordChild2: case OPC_RecordChild3: 3080 case OPC_RecordChild4: case OPC_RecordChild5: 3081 case OPC_RecordChild6: case OPC_RecordChild7: { 3082 unsigned ChildNo = Opcode-OPC_RecordChild0; 3083 if (ChildNo >= N.getNumOperands()) 3084 break; // Match fails if out of range child #. 3085 3086 RecordedNodes.push_back(std::make_pair(N->getOperand(ChildNo), 3087 N.getNode())); 3088 continue; 3089 } 3090 case OPC_RecordMemRef: 3091 if (auto *MN = dyn_cast<MemSDNode>(N)) 3092 MatchedMemRefs.push_back(MN->getMemOperand()); 3093 else { 3094 DEBUG( 3095 dbgs() << "Expected MemSDNode "; 3096 N->dump(CurDAG); 3097 dbgs() << '\n' 3098 ); 3099 } 3100 3101 continue; 3102 3103 case OPC_CaptureGlueInput: 3104 // If the current node has an input glue, capture it in InputGlue. 3105 if (N->getNumOperands() != 0 && 3106 N->getOperand(N->getNumOperands()-1).getValueType() == MVT::Glue) 3107 InputGlue = N->getOperand(N->getNumOperands()-1); 3108 continue; 3109 3110 case OPC_MoveChild: { 3111 unsigned ChildNo = MatcherTable[MatcherIndex++]; 3112 if (ChildNo >= N.getNumOperands()) 3113 break; // Match fails if out of range child #. 3114 N = N.getOperand(ChildNo); 3115 NodeStack.push_back(N); 3116 continue; 3117 } 3118 3119 case OPC_MoveChild0: case OPC_MoveChild1: 3120 case OPC_MoveChild2: case OPC_MoveChild3: 3121 case OPC_MoveChild4: case OPC_MoveChild5: 3122 case OPC_MoveChild6: case OPC_MoveChild7: { 3123 unsigned ChildNo = Opcode-OPC_MoveChild0; 3124 if (ChildNo >= N.getNumOperands()) 3125 break; // Match fails if out of range child #. 3126 N = N.getOperand(ChildNo); 3127 NodeStack.push_back(N); 3128 continue; 3129 } 3130 3131 case OPC_MoveParent: 3132 // Pop the current node off the NodeStack. 3133 NodeStack.pop_back(); 3134 assert(!NodeStack.empty() && "Node stack imbalance!"); 3135 N = NodeStack.back(); 3136 continue; 3137 3138 case OPC_CheckSame: 3139 if (!::CheckSame(MatcherTable, MatcherIndex, N, RecordedNodes)) break; 3140 continue; 3141 3142 case OPC_CheckChild0Same: case OPC_CheckChild1Same: 3143 case OPC_CheckChild2Same: case OPC_CheckChild3Same: 3144 if (!::CheckChildSame(MatcherTable, MatcherIndex, N, RecordedNodes, 3145 Opcode-OPC_CheckChild0Same)) 3146 break; 3147 continue; 3148 3149 case OPC_CheckPatternPredicate: 3150 if (!::CheckPatternPredicate(MatcherTable, MatcherIndex, *this)) break; 3151 continue; 3152 case OPC_CheckPredicate: 3153 if (!::CheckNodePredicate(MatcherTable, MatcherIndex, *this, 3154 N.getNode())) 3155 break; 3156 continue; 3157 case OPC_CheckComplexPat: { 3158 unsigned CPNum = MatcherTable[MatcherIndex++]; 3159 unsigned RecNo = MatcherTable[MatcherIndex++]; 3160 assert(RecNo < RecordedNodes.size() && "Invalid CheckComplexPat"); 3161 3162 // If target can modify DAG during matching, keep the matching state 3163 // consistent. 3164 std::unique_ptr<MatchStateUpdater> MSU; 3165 if (ComplexPatternFuncMutatesDAG()) 3166 MSU.reset(new MatchStateUpdater(*CurDAG, &NodeToMatch, RecordedNodes, 3167 MatchScopes)); 3168 3169 if (!CheckComplexPattern(NodeToMatch, RecordedNodes[RecNo].second, 3170 RecordedNodes[RecNo].first, CPNum, 3171 RecordedNodes)) 3172 break; 3173 continue; 3174 } 3175 case OPC_CheckOpcode: 3176 if (!::CheckOpcode(MatcherTable, MatcherIndex, N.getNode())) break; 3177 continue; 3178 3179 case OPC_CheckType: 3180 if (!::CheckType(MatcherTable, MatcherIndex, N, TLI, 3181 CurDAG->getDataLayout())) 3182 break; 3183 continue; 3184 3185 case OPC_CheckTypeRes: { 3186 unsigned Res = MatcherTable[MatcherIndex++]; 3187 if (!::CheckType(MatcherTable, MatcherIndex, N.getValue(Res), TLI, 3188 CurDAG->getDataLayout())) 3189 break; 3190 continue; 3191 } 3192 3193 case OPC_SwitchOpcode: { 3194 unsigned CurNodeOpcode = N.getOpcode(); 3195 unsigned SwitchStart = MatcherIndex-1; (void)SwitchStart; 3196 unsigned CaseSize; 3197 while (true) { 3198 // Get the size of this case. 3199 CaseSize = MatcherTable[MatcherIndex++]; 3200 if (CaseSize & 128) 3201 CaseSize = GetVBR(CaseSize, MatcherTable, MatcherIndex); 3202 if (CaseSize == 0) break; 3203 3204 uint16_t Opc = MatcherTable[MatcherIndex++]; 3205 Opc |= (unsigned short)MatcherTable[MatcherIndex++] << 8; 3206 3207 // If the opcode matches, then we will execute this case. 3208 if (CurNodeOpcode == Opc) 3209 break; 3210 3211 // Otherwise, skip over this case. 3212 MatcherIndex += CaseSize; 3213 } 3214 3215 // If no cases matched, bail out. 3216 if (CaseSize == 0) break; 3217 3218 // Otherwise, execute the case we found. 3219 DEBUG(dbgs() << " OpcodeSwitch from " << SwitchStart 3220 << " to " << MatcherIndex << "\n"); 3221 continue; 3222 } 3223 3224 case OPC_SwitchType: { 3225 MVT CurNodeVT = N.getSimpleValueType(); 3226 unsigned SwitchStart = MatcherIndex-1; (void)SwitchStart; 3227 unsigned CaseSize; 3228 while (true) { 3229 // Get the size of this case. 3230 CaseSize = MatcherTable[MatcherIndex++]; 3231 if (CaseSize & 128) 3232 CaseSize = GetVBR(CaseSize, MatcherTable, MatcherIndex); 3233 if (CaseSize == 0) break; 3234 3235 MVT CaseVT = (MVT::SimpleValueType)MatcherTable[MatcherIndex++]; 3236 if (CaseVT == MVT::iPTR) 3237 CaseVT = TLI->getPointerTy(CurDAG->getDataLayout()); 3238 3239 // If the VT matches, then we will execute this case. 3240 if (CurNodeVT == CaseVT) 3241 break; 3242 3243 // Otherwise, skip over this case. 3244 MatcherIndex += CaseSize; 3245 } 3246 3247 // If no cases matched, bail out. 3248 if (CaseSize == 0) break; 3249 3250 // Otherwise, execute the case we found. 3251 DEBUG(dbgs() << " TypeSwitch[" << EVT(CurNodeVT).getEVTString() 3252 << "] from " << SwitchStart << " to " << MatcherIndex<<'\n'); 3253 continue; 3254 } 3255 case OPC_CheckChild0Type: case OPC_CheckChild1Type: 3256 case OPC_CheckChild2Type: case OPC_CheckChild3Type: 3257 case OPC_CheckChild4Type: case OPC_CheckChild5Type: 3258 case OPC_CheckChild6Type: case OPC_CheckChild7Type: 3259 if (!::CheckChildType(MatcherTable, MatcherIndex, N, TLI, 3260 CurDAG->getDataLayout(), 3261 Opcode - OPC_CheckChild0Type)) 3262 break; 3263 continue; 3264 case OPC_CheckCondCode: 3265 if (!::CheckCondCode(MatcherTable, MatcherIndex, N)) break; 3266 continue; 3267 case OPC_CheckValueType: 3268 if (!::CheckValueType(MatcherTable, MatcherIndex, N, TLI, 3269 CurDAG->getDataLayout())) 3270 break; 3271 continue; 3272 case OPC_CheckInteger: 3273 if (!::CheckInteger(MatcherTable, MatcherIndex, N)) break; 3274 continue; 3275 case OPC_CheckChild0Integer: case OPC_CheckChild1Integer: 3276 case OPC_CheckChild2Integer: case OPC_CheckChild3Integer: 3277 case OPC_CheckChild4Integer: 3278 if (!::CheckChildInteger(MatcherTable, MatcherIndex, N, 3279 Opcode-OPC_CheckChild0Integer)) break; 3280 continue; 3281 case OPC_CheckAndImm: 3282 if (!::CheckAndImm(MatcherTable, MatcherIndex, N, *this)) break; 3283 continue; 3284 case OPC_CheckOrImm: 3285 if (!::CheckOrImm(MatcherTable, MatcherIndex, N, *this)) break; 3286 continue; 3287 3288 case OPC_CheckFoldableChainNode: { 3289 assert(NodeStack.size() != 1 && "No parent node"); 3290 // Verify that all intermediate nodes between the root and this one have 3291 // a single use. 3292 bool HasMultipleUses = false; 3293 for (unsigned i = 1, e = NodeStack.size()-1; i != e; ++i) 3294 if (!NodeStack[i].getNode()->hasOneUse()) { 3295 HasMultipleUses = true; 3296 break; 3297 } 3298 if (HasMultipleUses) break; 3299 3300 // Check to see that the target thinks this is profitable to fold and that 3301 // we can fold it without inducing cycles in the graph. 3302 if (!IsProfitableToFold(N, NodeStack[NodeStack.size()-2].getNode(), 3303 NodeToMatch) || 3304 !IsLegalToFold(N, NodeStack[NodeStack.size()-2].getNode(), 3305 NodeToMatch, OptLevel, 3306 true/*We validate our own chains*/)) 3307 break; 3308 3309 continue; 3310 } 3311 case OPC_EmitInteger: { 3312 MVT::SimpleValueType VT = 3313 (MVT::SimpleValueType)MatcherTable[MatcherIndex++]; 3314 int64_t Val = MatcherTable[MatcherIndex++]; 3315 if (Val & 128) 3316 Val = GetVBR(Val, MatcherTable, MatcherIndex); 3317 RecordedNodes.push_back(std::pair<SDValue, SDNode*>( 3318 CurDAG->getTargetConstant(Val, SDLoc(NodeToMatch), 3319 VT), nullptr)); 3320 continue; 3321 } 3322 case OPC_EmitRegister: { 3323 MVT::SimpleValueType VT = 3324 (MVT::SimpleValueType)MatcherTable[MatcherIndex++]; 3325 unsigned RegNo = MatcherTable[MatcherIndex++]; 3326 RecordedNodes.push_back(std::pair<SDValue, SDNode*>( 3327 CurDAG->getRegister(RegNo, VT), nullptr)); 3328 continue; 3329 } 3330 case OPC_EmitRegister2: { 3331 // For targets w/ more than 256 register names, the register enum 3332 // values are stored in two bytes in the matcher table (just like 3333 // opcodes). 3334 MVT::SimpleValueType VT = 3335 (MVT::SimpleValueType)MatcherTable[MatcherIndex++]; 3336 unsigned RegNo = MatcherTable[MatcherIndex++]; 3337 RegNo |= MatcherTable[MatcherIndex++] << 8; 3338 RecordedNodes.push_back(std::pair<SDValue, SDNode*>( 3339 CurDAG->getRegister(RegNo, VT), nullptr)); 3340 continue; 3341 } 3342 3343 case OPC_EmitConvertToTarget: { 3344 // Convert from IMM/FPIMM to target version. 3345 unsigned RecNo = MatcherTable[MatcherIndex++]; 3346 assert(RecNo < RecordedNodes.size() && "Invalid EmitConvertToTarget"); 3347 SDValue Imm = RecordedNodes[RecNo].first; 3348 3349 if (Imm->getOpcode() == ISD::Constant) { 3350 const ConstantInt *Val=cast<ConstantSDNode>(Imm)->getConstantIntValue(); 3351 Imm = CurDAG->getTargetConstant(*Val, SDLoc(NodeToMatch), 3352 Imm.getValueType()); 3353 } else if (Imm->getOpcode() == ISD::ConstantFP) { 3354 const ConstantFP *Val=cast<ConstantFPSDNode>(Imm)->getConstantFPValue(); 3355 Imm = CurDAG->getTargetConstantFP(*Val, SDLoc(NodeToMatch), 3356 Imm.getValueType()); 3357 } 3358 3359 RecordedNodes.push_back(std::make_pair(Imm, RecordedNodes[RecNo].second)); 3360 continue; 3361 } 3362 3363 case OPC_EmitMergeInputChains1_0: // OPC_EmitMergeInputChains, 1, 0 3364 case OPC_EmitMergeInputChains1_1: // OPC_EmitMergeInputChains, 1, 1 3365 case OPC_EmitMergeInputChains1_2: { // OPC_EmitMergeInputChains, 1, 2 3366 // These are space-optimized forms of OPC_EmitMergeInputChains. 3367 assert(!InputChain.getNode() && 3368 "EmitMergeInputChains should be the first chain producing node"); 3369 assert(ChainNodesMatched.empty() && 3370 "Should only have one EmitMergeInputChains per match"); 3371 3372 // Read all of the chained nodes. 3373 unsigned RecNo = Opcode - OPC_EmitMergeInputChains1_0; 3374 assert(RecNo < RecordedNodes.size() && "Invalid EmitMergeInputChains"); 3375 ChainNodesMatched.push_back(RecordedNodes[RecNo].first.getNode()); 3376 3377 // FIXME: What if other value results of the node have uses not matched 3378 // by this pattern? 3379 if (ChainNodesMatched.back() != NodeToMatch && 3380 !RecordedNodes[RecNo].first.hasOneUse()) { 3381 ChainNodesMatched.clear(); 3382 break; 3383 } 3384 3385 // Merge the input chains if they are not intra-pattern references. 3386 InputChain = HandleMergeInputChains(ChainNodesMatched, CurDAG); 3387 3388 if (!InputChain.getNode()) 3389 break; // Failed to merge. 3390 continue; 3391 } 3392 3393 case OPC_EmitMergeInputChains: { 3394 assert(!InputChain.getNode() && 3395 "EmitMergeInputChains should be the first chain producing node"); 3396 // This node gets a list of nodes we matched in the input that have 3397 // chains. We want to token factor all of the input chains to these nodes 3398 // together. However, if any of the input chains is actually one of the 3399 // nodes matched in this pattern, then we have an intra-match reference. 3400 // Ignore these because the newly token factored chain should not refer to 3401 // the old nodes. 3402 unsigned NumChains = MatcherTable[MatcherIndex++]; 3403 assert(NumChains != 0 && "Can't TF zero chains"); 3404 3405 assert(ChainNodesMatched.empty() && 3406 "Should only have one EmitMergeInputChains per match"); 3407 3408 // Read all of the chained nodes. 3409 for (unsigned i = 0; i != NumChains; ++i) { 3410 unsigned RecNo = MatcherTable[MatcherIndex++]; 3411 assert(RecNo < RecordedNodes.size() && "Invalid EmitMergeInputChains"); 3412 ChainNodesMatched.push_back(RecordedNodes[RecNo].first.getNode()); 3413 3414 // FIXME: What if other value results of the node have uses not matched 3415 // by this pattern? 3416 if (ChainNodesMatched.back() != NodeToMatch && 3417 !RecordedNodes[RecNo].first.hasOneUse()) { 3418 ChainNodesMatched.clear(); 3419 break; 3420 } 3421 } 3422 3423 // If the inner loop broke out, the match fails. 3424 if (ChainNodesMatched.empty()) 3425 break; 3426 3427 // Merge the input chains if they are not intra-pattern references. 3428 InputChain = HandleMergeInputChains(ChainNodesMatched, CurDAG); 3429 3430 if (!InputChain.getNode()) 3431 break; // Failed to merge. 3432 3433 continue; 3434 } 3435 3436 case OPC_EmitCopyToReg: { 3437 unsigned RecNo = MatcherTable[MatcherIndex++]; 3438 assert(RecNo < RecordedNodes.size() && "Invalid EmitCopyToReg"); 3439 unsigned DestPhysReg = MatcherTable[MatcherIndex++]; 3440 3441 if (!InputChain.getNode()) 3442 InputChain = CurDAG->getEntryNode(); 3443 3444 InputChain = CurDAG->getCopyToReg(InputChain, SDLoc(NodeToMatch), 3445 DestPhysReg, RecordedNodes[RecNo].first, 3446 InputGlue); 3447 3448 InputGlue = InputChain.getValue(1); 3449 continue; 3450 } 3451 3452 case OPC_EmitNodeXForm: { 3453 unsigned XFormNo = MatcherTable[MatcherIndex++]; 3454 unsigned RecNo = MatcherTable[MatcherIndex++]; 3455 assert(RecNo < RecordedNodes.size() && "Invalid EmitNodeXForm"); 3456 SDValue Res = RunSDNodeXForm(RecordedNodes[RecNo].first, XFormNo); 3457 RecordedNodes.push_back(std::pair<SDValue,SDNode*>(Res, nullptr)); 3458 continue; 3459 } 3460 case OPC_Coverage: { 3461 // This is emitted right before MorphNode/EmitNode. 3462 // So it should be safe to assume that this node has been selected 3463 unsigned index = MatcherTable[MatcherIndex++]; 3464 index |= (MatcherTable[MatcherIndex++] << 8); 3465 dbgs() << "COVERED: " << getPatternForIndex(index) << "\n"; 3466 dbgs() << "INCLUDED: " << getIncludePathForIndex(index) << "\n"; 3467 continue; 3468 } 3469 3470 case OPC_EmitNode: case OPC_MorphNodeTo: 3471 case OPC_EmitNode0: case OPC_EmitNode1: case OPC_EmitNode2: 3472 case OPC_MorphNodeTo0: case OPC_MorphNodeTo1: case OPC_MorphNodeTo2: { 3473 uint16_t TargetOpc = MatcherTable[MatcherIndex++]; 3474 TargetOpc |= (unsigned short)MatcherTable[MatcherIndex++] << 8; 3475 unsigned EmitNodeInfo = MatcherTable[MatcherIndex++]; 3476 // Get the result VT list. 3477 unsigned NumVTs; 3478 // If this is one of the compressed forms, get the number of VTs based 3479 // on the Opcode. Otherwise read the next byte from the table. 3480 if (Opcode >= OPC_MorphNodeTo0 && Opcode <= OPC_MorphNodeTo2) 3481 NumVTs = Opcode - OPC_MorphNodeTo0; 3482 else if (Opcode >= OPC_EmitNode0 && Opcode <= OPC_EmitNode2) 3483 NumVTs = Opcode - OPC_EmitNode0; 3484 else 3485 NumVTs = MatcherTable[MatcherIndex++]; 3486 SmallVector<EVT, 4> VTs; 3487 for (unsigned i = 0; i != NumVTs; ++i) { 3488 MVT::SimpleValueType VT = 3489 (MVT::SimpleValueType)MatcherTable[MatcherIndex++]; 3490 if (VT == MVT::iPTR) 3491 VT = TLI->getPointerTy(CurDAG->getDataLayout()).SimpleTy; 3492 VTs.push_back(VT); 3493 } 3494 3495 if (EmitNodeInfo & OPFL_Chain) 3496 VTs.push_back(MVT::Other); 3497 if (EmitNodeInfo & OPFL_GlueOutput) 3498 VTs.push_back(MVT::Glue); 3499 3500 // This is hot code, so optimize the two most common cases of 1 and 2 3501 // results. 3502 SDVTList VTList; 3503 if (VTs.size() == 1) 3504 VTList = CurDAG->getVTList(VTs[0]); 3505 else if (VTs.size() == 2) 3506 VTList = CurDAG->getVTList(VTs[0], VTs[1]); 3507 else 3508 VTList = CurDAG->getVTList(VTs); 3509 3510 // Get the operand list. 3511 unsigned NumOps = MatcherTable[MatcherIndex++]; 3512 SmallVector<SDValue, 8> Ops; 3513 for (unsigned i = 0; i != NumOps; ++i) { 3514 unsigned RecNo = MatcherTable[MatcherIndex++]; 3515 if (RecNo & 128) 3516 RecNo = GetVBR(RecNo, MatcherTable, MatcherIndex); 3517 3518 assert(RecNo < RecordedNodes.size() && "Invalid EmitNode"); 3519 Ops.push_back(RecordedNodes[RecNo].first); 3520 } 3521 3522 // If there are variadic operands to add, handle them now. 3523 if (EmitNodeInfo & OPFL_VariadicInfo) { 3524 // Determine the start index to copy from. 3525 unsigned FirstOpToCopy = getNumFixedFromVariadicInfo(EmitNodeInfo); 3526 FirstOpToCopy += (EmitNodeInfo & OPFL_Chain) ? 1 : 0; 3527 assert(NodeToMatch->getNumOperands() >= FirstOpToCopy && 3528 "Invalid variadic node"); 3529 // Copy all of the variadic operands, not including a potential glue 3530 // input. 3531 for (unsigned i = FirstOpToCopy, e = NodeToMatch->getNumOperands(); 3532 i != e; ++i) { 3533 SDValue V = NodeToMatch->getOperand(i); 3534 if (V.getValueType() == MVT::Glue) break; 3535 Ops.push_back(V); 3536 } 3537 } 3538 3539 // If this has chain/glue inputs, add them. 3540 if (EmitNodeInfo & OPFL_Chain) 3541 Ops.push_back(InputChain); 3542 if ((EmitNodeInfo & OPFL_GlueInput) && InputGlue.getNode() != nullptr) 3543 Ops.push_back(InputGlue); 3544 3545 // Create the node. 3546 MachineSDNode *Res = nullptr; 3547 bool IsMorphNodeTo = Opcode == OPC_MorphNodeTo || 3548 (Opcode >= OPC_MorphNodeTo0 && Opcode <= OPC_MorphNodeTo2); 3549 if (!IsMorphNodeTo) { 3550 // If this is a normal EmitNode command, just create the new node and 3551 // add the results to the RecordedNodes list. 3552 Res = CurDAG->getMachineNode(TargetOpc, SDLoc(NodeToMatch), 3553 VTList, Ops); 3554 3555 // Add all the non-glue/non-chain results to the RecordedNodes list. 3556 for (unsigned i = 0, e = VTs.size(); i != e; ++i) { 3557 if (VTs[i] == MVT::Other || VTs[i] == MVT::Glue) break; 3558 RecordedNodes.push_back(std::pair<SDValue,SDNode*>(SDValue(Res, i), 3559 nullptr)); 3560 } 3561 } else { 3562 assert(NodeToMatch->getOpcode() != ISD::DELETED_NODE && 3563 "NodeToMatch was removed partway through selection"); 3564 SelectionDAG::DAGNodeDeletedListener NDL(*CurDAG, [&](SDNode *N, 3565 SDNode *E) { 3566 CurDAG->salvageDebugInfo(*N); 3567 auto &Chain = ChainNodesMatched; 3568 assert((!E || !is_contained(Chain, N)) && 3569 "Chain node replaced during MorphNode"); 3570 Chain.erase(std::remove(Chain.begin(), Chain.end(), N), Chain.end()); 3571 }); 3572 Res = cast<MachineSDNode>(MorphNode(NodeToMatch, TargetOpc, VTList, 3573 Ops, EmitNodeInfo)); 3574 } 3575 3576 // If the node had chain/glue results, update our notion of the current 3577 // chain and glue. 3578 if (EmitNodeInfo & OPFL_GlueOutput) { 3579 InputGlue = SDValue(Res, VTs.size()-1); 3580 if (EmitNodeInfo & OPFL_Chain) 3581 InputChain = SDValue(Res, VTs.size()-2); 3582 } else if (EmitNodeInfo & OPFL_Chain) 3583 InputChain = SDValue(Res, VTs.size()-1); 3584 3585 // If the OPFL_MemRefs glue is set on this node, slap all of the 3586 // accumulated memrefs onto it. 3587 // 3588 // FIXME: This is vastly incorrect for patterns with multiple outputs 3589 // instructions that access memory and for ComplexPatterns that match 3590 // loads. 3591 if (EmitNodeInfo & OPFL_MemRefs) { 3592 // Only attach load or store memory operands if the generated 3593 // instruction may load or store. 3594 const MCInstrDesc &MCID = TII->get(TargetOpc); 3595 bool mayLoad = MCID.mayLoad(); 3596 bool mayStore = MCID.mayStore(); 3597 3598 unsigned NumMemRefs = 0; 3599 for (SmallVectorImpl<MachineMemOperand *>::const_iterator I = 3600 MatchedMemRefs.begin(), E = MatchedMemRefs.end(); I != E; ++I) { 3601 if ((*I)->isLoad()) { 3602 if (mayLoad) 3603 ++NumMemRefs; 3604 } else if ((*I)->isStore()) { 3605 if (mayStore) 3606 ++NumMemRefs; 3607 } else { 3608 ++NumMemRefs; 3609 } 3610 } 3611 3612 MachineSDNode::mmo_iterator MemRefs = 3613 MF->allocateMemRefsArray(NumMemRefs); 3614 3615 MachineSDNode::mmo_iterator MemRefsPos = MemRefs; 3616 for (SmallVectorImpl<MachineMemOperand *>::const_iterator I = 3617 MatchedMemRefs.begin(), E = MatchedMemRefs.end(); I != E; ++I) { 3618 if ((*I)->isLoad()) { 3619 if (mayLoad) 3620 *MemRefsPos++ = *I; 3621 } else if ((*I)->isStore()) { 3622 if (mayStore) 3623 *MemRefsPos++ = *I; 3624 } else { 3625 *MemRefsPos++ = *I; 3626 } 3627 } 3628 3629 Res->setMemRefs(MemRefs, MemRefs + NumMemRefs); 3630 } 3631 3632 DEBUG( 3633 if (!MatchedMemRefs.empty() && Res->memoperands_empty()) 3634 dbgs() << " Dropping mem operands\n"; 3635 dbgs() << " " 3636 << (IsMorphNodeTo ? "Morphed" : "Created") 3637 << " node: "; 3638 Res->dump(CurDAG); 3639 ); 3640 3641 // If this was a MorphNodeTo then we're completely done! 3642 if (IsMorphNodeTo) { 3643 // Update chain uses. 3644 UpdateChains(Res, InputChain, ChainNodesMatched, true); 3645 return; 3646 } 3647 continue; 3648 } 3649 3650 case OPC_CompleteMatch: { 3651 // The match has been completed, and any new nodes (if any) have been 3652 // created. Patch up references to the matched dag to use the newly 3653 // created nodes. 3654 unsigned NumResults = MatcherTable[MatcherIndex++]; 3655 3656 for (unsigned i = 0; i != NumResults; ++i) { 3657 unsigned ResSlot = MatcherTable[MatcherIndex++]; 3658 if (ResSlot & 128) 3659 ResSlot = GetVBR(ResSlot, MatcherTable, MatcherIndex); 3660 3661 assert(ResSlot < RecordedNodes.size() && "Invalid CompleteMatch"); 3662 SDValue Res = RecordedNodes[ResSlot].first; 3663 3664 assert(i < NodeToMatch->getNumValues() && 3665 NodeToMatch->getValueType(i) != MVT::Other && 3666 NodeToMatch->getValueType(i) != MVT::Glue && 3667 "Invalid number of results to complete!"); 3668 assert((NodeToMatch->getValueType(i) == Res.getValueType() || 3669 NodeToMatch->getValueType(i) == MVT::iPTR || 3670 Res.getValueType() == MVT::iPTR || 3671 NodeToMatch->getValueType(i).getSizeInBits() == 3672 Res.getValueSizeInBits()) && 3673 "invalid replacement"); 3674 ReplaceUses(SDValue(NodeToMatch, i), Res); 3675 } 3676 3677 // Update chain uses. 3678 UpdateChains(NodeToMatch, InputChain, ChainNodesMatched, false); 3679 3680 // If the root node defines glue, we need to update it to the glue result. 3681 // TODO: This never happens in our tests and I think it can be removed / 3682 // replaced with an assert, but if we do it this the way the change is 3683 // NFC. 3684 if (NodeToMatch->getValueType(NodeToMatch->getNumValues() - 1) == 3685 MVT::Glue && 3686 InputGlue.getNode()) 3687 ReplaceUses(SDValue(NodeToMatch, NodeToMatch->getNumValues() - 1), 3688 InputGlue); 3689 3690 assert(NodeToMatch->use_empty() && 3691 "Didn't replace all uses of the node?"); 3692 CurDAG->RemoveDeadNode(NodeToMatch); 3693 3694 return; 3695 } 3696 } 3697 3698 // If the code reached this point, then the match failed. See if there is 3699 // another child to try in the current 'Scope', otherwise pop it until we 3700 // find a case to check. 3701 DEBUG(dbgs() << " Match failed at index " << CurrentOpcodeIndex << "\n"); 3702 ++NumDAGIselRetries; 3703 while (true) { 3704 if (MatchScopes.empty()) { 3705 CannotYetSelect(NodeToMatch); 3706 return; 3707 } 3708 3709 // Restore the interpreter state back to the point where the scope was 3710 // formed. 3711 MatchScope &LastScope = MatchScopes.back(); 3712 RecordedNodes.resize(LastScope.NumRecordedNodes); 3713 NodeStack.clear(); 3714 NodeStack.append(LastScope.NodeStack.begin(), LastScope.NodeStack.end()); 3715 N = NodeStack.back(); 3716 3717 if (LastScope.NumMatchedMemRefs != MatchedMemRefs.size()) 3718 MatchedMemRefs.resize(LastScope.NumMatchedMemRefs); 3719 MatcherIndex = LastScope.FailIndex; 3720 3721 DEBUG(dbgs() << " Continuing at " << MatcherIndex << "\n"); 3722 3723 InputChain = LastScope.InputChain; 3724 InputGlue = LastScope.InputGlue; 3725 if (!LastScope.HasChainNodesMatched) 3726 ChainNodesMatched.clear(); 3727 3728 // Check to see what the offset is at the new MatcherIndex. If it is zero 3729 // we have reached the end of this scope, otherwise we have another child 3730 // in the current scope to try. 3731 unsigned NumToSkip = MatcherTable[MatcherIndex++]; 3732 if (NumToSkip & 128) 3733 NumToSkip = GetVBR(NumToSkip, MatcherTable, MatcherIndex); 3734 3735 // If we have another child in this scope to match, update FailIndex and 3736 // try it. 3737 if (NumToSkip != 0) { 3738 LastScope.FailIndex = MatcherIndex+NumToSkip; 3739 break; 3740 } 3741 3742 // End of this scope, pop it and try the next child in the containing 3743 // scope. 3744 MatchScopes.pop_back(); 3745 } 3746 } 3747 } 3748 3749 bool SelectionDAGISel::isOrEquivalentToAdd(const SDNode *N) const { 3750 assert(N->getOpcode() == ISD::OR && "Unexpected opcode"); 3751 auto *C = dyn_cast<ConstantSDNode>(N->getOperand(1)); 3752 if (!C) 3753 return false; 3754 3755 // Detect when "or" is used to add an offset to a stack object. 3756 if (auto *FN = dyn_cast<FrameIndexSDNode>(N->getOperand(0))) { 3757 MachineFrameInfo &MFI = MF->getFrameInfo(); 3758 unsigned A = MFI.getObjectAlignment(FN->getIndex()); 3759 assert(isPowerOf2_32(A) && "Unexpected alignment"); 3760 int32_t Off = C->getSExtValue(); 3761 // If the alleged offset fits in the zero bits guaranteed by 3762 // the alignment, then this or is really an add. 3763 return (Off >= 0) && (((A - 1) & Off) == unsigned(Off)); 3764 } 3765 return false; 3766 } 3767 3768 void SelectionDAGISel::CannotYetSelect(SDNode *N) { 3769 std::string msg; 3770 raw_string_ostream Msg(msg); 3771 Msg << "Cannot select: "; 3772 3773 if (N->getOpcode() != ISD::INTRINSIC_W_CHAIN && 3774 N->getOpcode() != ISD::INTRINSIC_WO_CHAIN && 3775 N->getOpcode() != ISD::INTRINSIC_VOID) { 3776 N->printrFull(Msg, CurDAG); 3777 Msg << "\nIn function: " << MF->getName(); 3778 } else { 3779 bool HasInputChain = N->getOperand(0).getValueType() == MVT::Other; 3780 unsigned iid = 3781 cast<ConstantSDNode>(N->getOperand(HasInputChain))->getZExtValue(); 3782 if (iid < Intrinsic::num_intrinsics) 3783 Msg << "intrinsic %" << Intrinsic::getName((Intrinsic::ID)iid, None); 3784 else if (const TargetIntrinsicInfo *TII = TM.getIntrinsicInfo()) 3785 Msg << "target intrinsic %" << TII->getName(iid); 3786 else 3787 Msg << "unknown intrinsic #" << iid; 3788 } 3789 report_fatal_error(Msg.str()); 3790 } 3791 3792 char SelectionDAGISel::ID = 0; 3793