1 //===--- RDFGraph.cpp -----------------------------------------------------===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 // Target-independent, SSA-based data flow graph for register data flow (RDF). 11 // 12 #include "RDFGraph.h" 13 #include "llvm/ADT/SetVector.h" 14 #include "llvm/ADT/STLExtras.h" 15 #include "llvm/CodeGen/MachineBasicBlock.h" 16 #include "llvm/CodeGen/MachineDominanceFrontier.h" 17 #include "llvm/CodeGen/MachineDominators.h" 18 #include "llvm/CodeGen/MachineFunction.h" 19 #include "llvm/CodeGen/MachineInstr.h" 20 #include "llvm/CodeGen/MachineOperand.h" 21 #include "llvm/CodeGen/MachineRegisterInfo.h" 22 #include "llvm/IR/Function.h" 23 #include "llvm/MC/LaneBitmask.h" 24 #include "llvm/MC/MCInstrDesc.h" 25 #include "llvm/MC/MCRegisterInfo.h" 26 #include "llvm/Support/ErrorHandling.h" 27 #include "llvm/Support/raw_ostream.h" 28 #include "llvm/Target/TargetInstrInfo.h" 29 #include "llvm/Target/TargetLowering.h" 30 #include "llvm/Target/TargetRegisterInfo.h" 31 #include <algorithm> 32 #include <cassert> 33 #include <cstdint> 34 #include <cstring> 35 #include <iterator> 36 #include <utility> 37 #include <vector> 38 39 using namespace llvm; 40 using namespace rdf; 41 42 // Printing functions. Have them here first, so that the rest of the code 43 // can use them. 44 namespace llvm { 45 namespace rdf { 46 47 raw_ostream &operator<< (raw_ostream &OS, const PrintLaneMaskOpt &P) { 48 if (!P.Mask.all()) 49 OS << ':' << PrintLaneMask(P.Mask); 50 return OS; 51 } 52 53 template<> 54 raw_ostream &operator<< (raw_ostream &OS, const Print<RegisterRef> &P) { 55 auto &TRI = P.G.getTRI(); 56 if (P.Obj.Reg > 0 && P.Obj.Reg < TRI.getNumRegs()) 57 OS << TRI.getName(P.Obj.Reg); 58 else 59 OS << '#' << P.Obj.Reg; 60 OS << PrintLaneMaskOpt(P.Obj.Mask); 61 return OS; 62 } 63 64 template<> 65 raw_ostream &operator<< (raw_ostream &OS, const Print<NodeId> &P) { 66 auto NA = P.G.addr<NodeBase*>(P.Obj); 67 uint16_t Attrs = NA.Addr->getAttrs(); 68 uint16_t Kind = NodeAttrs::kind(Attrs); 69 uint16_t Flags = NodeAttrs::flags(Attrs); 70 switch (NodeAttrs::type(Attrs)) { 71 case NodeAttrs::Code: 72 switch (Kind) { 73 case NodeAttrs::Func: OS << 'f'; break; 74 case NodeAttrs::Block: OS << 'b'; break; 75 case NodeAttrs::Stmt: OS << 's'; break; 76 case NodeAttrs::Phi: OS << 'p'; break; 77 default: OS << "c?"; break; 78 } 79 break; 80 case NodeAttrs::Ref: 81 if (Flags & NodeAttrs::Undef) 82 OS << '/'; 83 if (Flags & NodeAttrs::Dead) 84 OS << '\\'; 85 if (Flags & NodeAttrs::Preserving) 86 OS << '+'; 87 if (Flags & NodeAttrs::Clobbering) 88 OS << '~'; 89 switch (Kind) { 90 case NodeAttrs::Use: OS << 'u'; break; 91 case NodeAttrs::Def: OS << 'd'; break; 92 case NodeAttrs::Block: OS << 'b'; break; 93 default: OS << "r?"; break; 94 } 95 break; 96 default: 97 OS << '?'; 98 break; 99 } 100 OS << P.Obj; 101 if (Flags & NodeAttrs::Shadow) 102 OS << '"'; 103 return OS; 104 } 105 106 static void printRefHeader(raw_ostream &OS, const NodeAddr<RefNode*> RA, 107 const DataFlowGraph &G) { 108 OS << Print<NodeId>(RA.Id, G) << '<' 109 << Print<RegisterRef>(RA.Addr->getRegRef(G), G) << '>'; 110 if (RA.Addr->getFlags() & NodeAttrs::Fixed) 111 OS << '!'; 112 } 113 114 template<> 115 raw_ostream &operator<< (raw_ostream &OS, const Print<NodeAddr<DefNode*>> &P) { 116 printRefHeader(OS, P.Obj, P.G); 117 OS << '('; 118 if (NodeId N = P.Obj.Addr->getReachingDef()) 119 OS << Print<NodeId>(N, P.G); 120 OS << ','; 121 if (NodeId N = P.Obj.Addr->getReachedDef()) 122 OS << Print<NodeId>(N, P.G); 123 OS << ','; 124 if (NodeId N = P.Obj.Addr->getReachedUse()) 125 OS << Print<NodeId>(N, P.G); 126 OS << "):"; 127 if (NodeId N = P.Obj.Addr->getSibling()) 128 OS << Print<NodeId>(N, P.G); 129 return OS; 130 } 131 132 template<> 133 raw_ostream &operator<< (raw_ostream &OS, const Print<NodeAddr<UseNode*>> &P) { 134 printRefHeader(OS, P.Obj, P.G); 135 OS << '('; 136 if (NodeId N = P.Obj.Addr->getReachingDef()) 137 OS << Print<NodeId>(N, P.G); 138 OS << "):"; 139 if (NodeId N = P.Obj.Addr->getSibling()) 140 OS << Print<NodeId>(N, P.G); 141 return OS; 142 } 143 144 template<> 145 raw_ostream &operator<< (raw_ostream &OS, 146 const Print<NodeAddr<PhiUseNode*>> &P) { 147 printRefHeader(OS, P.Obj, P.G); 148 OS << '('; 149 if (NodeId N = P.Obj.Addr->getReachingDef()) 150 OS << Print<NodeId>(N, P.G); 151 OS << ','; 152 if (NodeId N = P.Obj.Addr->getPredecessor()) 153 OS << Print<NodeId>(N, P.G); 154 OS << "):"; 155 if (NodeId N = P.Obj.Addr->getSibling()) 156 OS << Print<NodeId>(N, P.G); 157 return OS; 158 } 159 160 template<> 161 raw_ostream &operator<< (raw_ostream &OS, const Print<NodeAddr<RefNode*>> &P) { 162 switch (P.Obj.Addr->getKind()) { 163 case NodeAttrs::Def: 164 OS << PrintNode<DefNode*>(P.Obj, P.G); 165 break; 166 case NodeAttrs::Use: 167 if (P.Obj.Addr->getFlags() & NodeAttrs::PhiRef) 168 OS << PrintNode<PhiUseNode*>(P.Obj, P.G); 169 else 170 OS << PrintNode<UseNode*>(P.Obj, P.G); 171 break; 172 } 173 return OS; 174 } 175 176 template<> 177 raw_ostream &operator<< (raw_ostream &OS, const Print<NodeList> &P) { 178 unsigned N = P.Obj.size(); 179 for (auto I : P.Obj) { 180 OS << Print<NodeId>(I.Id, P.G); 181 if (--N) 182 OS << ' '; 183 } 184 return OS; 185 } 186 187 template<> 188 raw_ostream &operator<< (raw_ostream &OS, const Print<NodeSet> &P) { 189 unsigned N = P.Obj.size(); 190 for (auto I : P.Obj) { 191 OS << Print<NodeId>(I, P.G); 192 if (--N) 193 OS << ' '; 194 } 195 return OS; 196 } 197 198 namespace { 199 200 template <typename T> 201 struct PrintListV { 202 PrintListV(const NodeList &L, const DataFlowGraph &G) : List(L), G(G) {} 203 204 typedef T Type; 205 const NodeList &List; 206 const DataFlowGraph &G; 207 }; 208 209 template <typename T> 210 raw_ostream &operator<< (raw_ostream &OS, const PrintListV<T> &P) { 211 unsigned N = P.List.size(); 212 for (NodeAddr<T> A : P.List) { 213 OS << PrintNode<T>(A, P.G); 214 if (--N) 215 OS << ", "; 216 } 217 return OS; 218 } 219 220 } // end anonymous namespace 221 222 template<> 223 raw_ostream &operator<< (raw_ostream &OS, const Print<NodeAddr<PhiNode*>> &P) { 224 OS << Print<NodeId>(P.Obj.Id, P.G) << ": phi [" 225 << PrintListV<RefNode*>(P.Obj.Addr->members(P.G), P.G) << ']'; 226 return OS; 227 } 228 229 template<> 230 raw_ostream &operator<< (raw_ostream &OS, 231 const Print<NodeAddr<StmtNode*>> &P) { 232 const MachineInstr &MI = *P.Obj.Addr->getCode(); 233 unsigned Opc = MI.getOpcode(); 234 OS << Print<NodeId>(P.Obj.Id, P.G) << ": " << P.G.getTII().getName(Opc); 235 // Print the target for calls and branches (for readability). 236 if (MI.isCall() || MI.isBranch()) { 237 MachineInstr::const_mop_iterator T = 238 llvm::find_if(MI.operands(), 239 [] (const MachineOperand &Op) -> bool { 240 return Op.isMBB() || Op.isGlobal() || Op.isSymbol(); 241 }); 242 if (T != MI.operands_end()) { 243 OS << ' '; 244 if (T->isMBB()) 245 OS << "BB#" << T->getMBB()->getNumber(); 246 else if (T->isGlobal()) 247 OS << T->getGlobal()->getName(); 248 else if (T->isSymbol()) 249 OS << T->getSymbolName(); 250 } 251 } 252 OS << " [" << PrintListV<RefNode*>(P.Obj.Addr->members(P.G), P.G) << ']'; 253 return OS; 254 } 255 256 template<> 257 raw_ostream &operator<< (raw_ostream &OS, 258 const Print<NodeAddr<InstrNode*>> &P) { 259 switch (P.Obj.Addr->getKind()) { 260 case NodeAttrs::Phi: 261 OS << PrintNode<PhiNode*>(P.Obj, P.G); 262 break; 263 case NodeAttrs::Stmt: 264 OS << PrintNode<StmtNode*>(P.Obj, P.G); 265 break; 266 default: 267 OS << "instr? " << Print<NodeId>(P.Obj.Id, P.G); 268 break; 269 } 270 return OS; 271 } 272 273 template<> 274 raw_ostream &operator<< (raw_ostream &OS, 275 const Print<NodeAddr<BlockNode*>> &P) { 276 MachineBasicBlock *BB = P.Obj.Addr->getCode(); 277 unsigned NP = BB->pred_size(); 278 std::vector<int> Ns; 279 auto PrintBBs = [&OS,&P] (std::vector<int> Ns) -> void { 280 unsigned N = Ns.size(); 281 for (int I : Ns) { 282 OS << "BB#" << I; 283 if (--N) 284 OS << ", "; 285 } 286 }; 287 288 OS << Print<NodeId>(P.Obj.Id, P.G) << ": --- BB#" << BB->getNumber() 289 << " --- preds(" << NP << "): "; 290 for (MachineBasicBlock *B : BB->predecessors()) 291 Ns.push_back(B->getNumber()); 292 PrintBBs(Ns); 293 294 unsigned NS = BB->succ_size(); 295 OS << " succs(" << NS << "): "; 296 Ns.clear(); 297 for (MachineBasicBlock *B : BB->successors()) 298 Ns.push_back(B->getNumber()); 299 PrintBBs(Ns); 300 OS << '\n'; 301 302 for (auto I : P.Obj.Addr->members(P.G)) 303 OS << PrintNode<InstrNode*>(I, P.G) << '\n'; 304 return OS; 305 } 306 307 template<> 308 raw_ostream &operator<< (raw_ostream &OS, 309 const Print<NodeAddr<FuncNode*>> &P) { 310 OS << "DFG dump:[\n" << Print<NodeId>(P.Obj.Id, P.G) << ": Function: " 311 << P.Obj.Addr->getCode()->getName() << '\n'; 312 for (auto I : P.Obj.Addr->members(P.G)) 313 OS << PrintNode<BlockNode*>(I, P.G) << '\n'; 314 OS << "]\n"; 315 return OS; 316 } 317 318 template<> 319 raw_ostream &operator<< (raw_ostream &OS, const Print<RegisterSet> &P) { 320 OS << '{'; 321 for (auto I : P.Obj) 322 OS << ' ' << Print<RegisterRef>(I, P.G); 323 OS << " }"; 324 return OS; 325 } 326 327 template<> 328 raw_ostream &operator<< (raw_ostream &OS, const Print<RegisterAggr> &P) { 329 P.Obj.print(OS); 330 return OS; 331 } 332 333 template<> 334 raw_ostream &operator<< (raw_ostream &OS, 335 const Print<DataFlowGraph::DefStack> &P) { 336 for (auto I = P.Obj.top(), E = P.Obj.bottom(); I != E; ) { 337 OS << Print<NodeId>(I->Id, P.G) 338 << '<' << Print<RegisterRef>(I->Addr->getRegRef(P.G), P.G) << '>'; 339 I.down(); 340 if (I != E) 341 OS << ' '; 342 } 343 return OS; 344 } 345 346 } // end namespace rdf 347 } // end namespace llvm 348 349 // Node allocation functions. 350 // 351 // Node allocator is like a slab memory allocator: it allocates blocks of 352 // memory in sizes that are multiples of the size of a node. Each block has 353 // the same size. Nodes are allocated from the currently active block, and 354 // when it becomes full, a new one is created. 355 // There is a mapping scheme between node id and its location in a block, 356 // and within that block is described in the header file. 357 // 358 void NodeAllocator::startNewBlock() { 359 void *T = MemPool.Allocate(NodesPerBlock*NodeMemSize, NodeMemSize); 360 char *P = static_cast<char*>(T); 361 Blocks.push_back(P); 362 // Check if the block index is still within the allowed range, i.e. less 363 // than 2^N, where N is the number of bits in NodeId for the block index. 364 // BitsPerIndex is the number of bits per node index. 365 assert((Blocks.size() < ((size_t)1 << (8*sizeof(NodeId)-BitsPerIndex))) && 366 "Out of bits for block index"); 367 ActiveEnd = P; 368 } 369 370 bool NodeAllocator::needNewBlock() { 371 if (Blocks.empty()) 372 return true; 373 374 char *ActiveBegin = Blocks.back(); 375 uint32_t Index = (ActiveEnd-ActiveBegin)/NodeMemSize; 376 return Index >= NodesPerBlock; 377 } 378 379 NodeAddr<NodeBase*> NodeAllocator::New() { 380 if (needNewBlock()) 381 startNewBlock(); 382 383 uint32_t ActiveB = Blocks.size()-1; 384 uint32_t Index = (ActiveEnd - Blocks[ActiveB])/NodeMemSize; 385 NodeAddr<NodeBase*> NA = { reinterpret_cast<NodeBase*>(ActiveEnd), 386 makeId(ActiveB, Index) }; 387 ActiveEnd += NodeMemSize; 388 return NA; 389 } 390 391 NodeId NodeAllocator::id(const NodeBase *P) const { 392 uintptr_t A = reinterpret_cast<uintptr_t>(P); 393 for (unsigned i = 0, n = Blocks.size(); i != n; ++i) { 394 uintptr_t B = reinterpret_cast<uintptr_t>(Blocks[i]); 395 if (A < B || A >= B + NodesPerBlock*NodeMemSize) 396 continue; 397 uint32_t Idx = (A-B)/NodeMemSize; 398 return makeId(i, Idx); 399 } 400 llvm_unreachable("Invalid node address"); 401 } 402 403 void NodeAllocator::clear() { 404 MemPool.Reset(); 405 Blocks.clear(); 406 ActiveEnd = nullptr; 407 } 408 409 // Insert node NA after "this" in the circular chain. 410 void NodeBase::append(NodeAddr<NodeBase*> NA) { 411 NodeId Nx = Next; 412 // If NA is already "next", do nothing. 413 if (Next != NA.Id) { 414 Next = NA.Id; 415 NA.Addr->Next = Nx; 416 } 417 } 418 419 // Fundamental node manipulator functions. 420 421 // Obtain the register reference from a reference node. 422 RegisterRef RefNode::getRegRef(const DataFlowGraph &G) const { 423 assert(NodeAttrs::type(Attrs) == NodeAttrs::Ref); 424 if (NodeAttrs::flags(Attrs) & NodeAttrs::PhiRef) 425 return G.unpack(Ref.PR); 426 assert(Ref.Op != nullptr); 427 return G.makeRegRef(Ref.Op->getReg(), Ref.Op->getSubReg()); 428 } 429 430 // Set the register reference in the reference node directly (for references 431 // in phi nodes). 432 void RefNode::setRegRef(RegisterRef RR, DataFlowGraph &G) { 433 assert(NodeAttrs::type(Attrs) == NodeAttrs::Ref); 434 assert(NodeAttrs::flags(Attrs) & NodeAttrs::PhiRef); 435 Ref.PR = G.pack(RR); 436 } 437 438 // Set the register reference in the reference node based on a machine 439 // operand (for references in statement nodes). 440 void RefNode::setRegRef(MachineOperand *Op, DataFlowGraph &G) { 441 assert(NodeAttrs::type(Attrs) == NodeAttrs::Ref); 442 assert(!(NodeAttrs::flags(Attrs) & NodeAttrs::PhiRef)); 443 (void)G; 444 Ref.Op = Op; 445 } 446 447 // Get the owner of a given reference node. 448 NodeAddr<NodeBase*> RefNode::getOwner(const DataFlowGraph &G) { 449 NodeAddr<NodeBase*> NA = G.addr<NodeBase*>(getNext()); 450 451 while (NA.Addr != this) { 452 if (NA.Addr->getType() == NodeAttrs::Code) 453 return NA; 454 NA = G.addr<NodeBase*>(NA.Addr->getNext()); 455 } 456 llvm_unreachable("No owner in circular list"); 457 } 458 459 // Connect the def node to the reaching def node. 460 void DefNode::linkToDef(NodeId Self, NodeAddr<DefNode*> DA) { 461 Ref.RD = DA.Id; 462 Ref.Sib = DA.Addr->getReachedDef(); 463 DA.Addr->setReachedDef(Self); 464 } 465 466 // Connect the use node to the reaching def node. 467 void UseNode::linkToDef(NodeId Self, NodeAddr<DefNode*> DA) { 468 Ref.RD = DA.Id; 469 Ref.Sib = DA.Addr->getReachedUse(); 470 DA.Addr->setReachedUse(Self); 471 } 472 473 // Get the first member of the code node. 474 NodeAddr<NodeBase*> CodeNode::getFirstMember(const DataFlowGraph &G) const { 475 if (Code.FirstM == 0) 476 return NodeAddr<NodeBase*>(); 477 return G.addr<NodeBase*>(Code.FirstM); 478 } 479 480 // Get the last member of the code node. 481 NodeAddr<NodeBase*> CodeNode::getLastMember(const DataFlowGraph &G) const { 482 if (Code.LastM == 0) 483 return NodeAddr<NodeBase*>(); 484 return G.addr<NodeBase*>(Code.LastM); 485 } 486 487 // Add node NA at the end of the member list of the given code node. 488 void CodeNode::addMember(NodeAddr<NodeBase*> NA, const DataFlowGraph &G) { 489 NodeAddr<NodeBase*> ML = getLastMember(G); 490 if (ML.Id != 0) { 491 ML.Addr->append(NA); 492 } else { 493 Code.FirstM = NA.Id; 494 NodeId Self = G.id(this); 495 NA.Addr->setNext(Self); 496 } 497 Code.LastM = NA.Id; 498 } 499 500 // Add node NA after member node MA in the given code node. 501 void CodeNode::addMemberAfter(NodeAddr<NodeBase*> MA, NodeAddr<NodeBase*> NA, 502 const DataFlowGraph &G) { 503 MA.Addr->append(NA); 504 if (Code.LastM == MA.Id) 505 Code.LastM = NA.Id; 506 } 507 508 // Remove member node NA from the given code node. 509 void CodeNode::removeMember(NodeAddr<NodeBase*> NA, const DataFlowGraph &G) { 510 NodeAddr<NodeBase*> MA = getFirstMember(G); 511 assert(MA.Id != 0); 512 513 // Special handling if the member to remove is the first member. 514 if (MA.Id == NA.Id) { 515 if (Code.LastM == MA.Id) { 516 // If it is the only member, set both first and last to 0. 517 Code.FirstM = Code.LastM = 0; 518 } else { 519 // Otherwise, advance the first member. 520 Code.FirstM = MA.Addr->getNext(); 521 } 522 return; 523 } 524 525 while (MA.Addr != this) { 526 NodeId MX = MA.Addr->getNext(); 527 if (MX == NA.Id) { 528 MA.Addr->setNext(NA.Addr->getNext()); 529 // If the member to remove happens to be the last one, update the 530 // LastM indicator. 531 if (Code.LastM == NA.Id) 532 Code.LastM = MA.Id; 533 return; 534 } 535 MA = G.addr<NodeBase*>(MX); 536 } 537 llvm_unreachable("No such member"); 538 } 539 540 // Return the list of all members of the code node. 541 NodeList CodeNode::members(const DataFlowGraph &G) const { 542 static auto True = [] (NodeAddr<NodeBase*>) -> bool { return true; }; 543 return members_if(True, G); 544 } 545 546 // Return the owner of the given instr node. 547 NodeAddr<NodeBase*> InstrNode::getOwner(const DataFlowGraph &G) { 548 NodeAddr<NodeBase*> NA = G.addr<NodeBase*>(getNext()); 549 550 while (NA.Addr != this) { 551 assert(NA.Addr->getType() == NodeAttrs::Code); 552 if (NA.Addr->getKind() == NodeAttrs::Block) 553 return NA; 554 NA = G.addr<NodeBase*>(NA.Addr->getNext()); 555 } 556 llvm_unreachable("No owner in circular list"); 557 } 558 559 // Add the phi node PA to the given block node. 560 void BlockNode::addPhi(NodeAddr<PhiNode*> PA, const DataFlowGraph &G) { 561 NodeAddr<NodeBase*> M = getFirstMember(G); 562 if (M.Id == 0) { 563 addMember(PA, G); 564 return; 565 } 566 567 assert(M.Addr->getType() == NodeAttrs::Code); 568 if (M.Addr->getKind() == NodeAttrs::Stmt) { 569 // If the first member of the block is a statement, insert the phi as 570 // the first member. 571 Code.FirstM = PA.Id; 572 PA.Addr->setNext(M.Id); 573 } else { 574 // If the first member is a phi, find the last phi, and append PA to it. 575 assert(M.Addr->getKind() == NodeAttrs::Phi); 576 NodeAddr<NodeBase*> MN = M; 577 do { 578 M = MN; 579 MN = G.addr<NodeBase*>(M.Addr->getNext()); 580 assert(MN.Addr->getType() == NodeAttrs::Code); 581 } while (MN.Addr->getKind() == NodeAttrs::Phi); 582 583 // M is the last phi. 584 addMemberAfter(M, PA, G); 585 } 586 } 587 588 // Find the block node corresponding to the machine basic block BB in the 589 // given func node. 590 NodeAddr<BlockNode*> FuncNode::findBlock(const MachineBasicBlock *BB, 591 const DataFlowGraph &G) const { 592 auto EqBB = [BB] (NodeAddr<NodeBase*> NA) -> bool { 593 return NodeAddr<BlockNode*>(NA).Addr->getCode() == BB; 594 }; 595 NodeList Ms = members_if(EqBB, G); 596 if (!Ms.empty()) 597 return Ms[0]; 598 return NodeAddr<BlockNode*>(); 599 } 600 601 // Get the block node for the entry block in the given function. 602 NodeAddr<BlockNode*> FuncNode::getEntryBlock(const DataFlowGraph &G) { 603 MachineBasicBlock *EntryB = &getCode()->front(); 604 return findBlock(EntryB, G); 605 } 606 607 // Target operand information. 608 // 609 610 // For a given instruction, check if there are any bits of RR that can remain 611 // unchanged across this def. 612 bool TargetOperandInfo::isPreserving(const MachineInstr &In, unsigned OpNum) 613 const { 614 return TII.isPredicated(In); 615 } 616 617 // Check if the definition of RR produces an unspecified value. 618 bool TargetOperandInfo::isClobbering(const MachineInstr &In, unsigned OpNum) 619 const { 620 if (In.isCall()) 621 if (In.getOperand(OpNum).isImplicit()) 622 return true; 623 return false; 624 } 625 626 // Check if the given instruction specifically requires 627 bool TargetOperandInfo::isFixedReg(const MachineInstr &In, unsigned OpNum) 628 const { 629 if (In.isCall() || In.isReturn() || In.isInlineAsm()) 630 return true; 631 // Check for a tail call. 632 if (In.isBranch()) 633 for (const MachineOperand &O : In.operands()) 634 if (O.isGlobal() || O.isSymbol()) 635 return true; 636 637 const MCInstrDesc &D = In.getDesc(); 638 if (!D.getImplicitDefs() && !D.getImplicitUses()) 639 return false; 640 const MachineOperand &Op = In.getOperand(OpNum); 641 // If there is a sub-register, treat the operand as non-fixed. Currently, 642 // fixed registers are those that are listed in the descriptor as implicit 643 // uses or defs, and those lists do not allow sub-registers. 644 if (Op.getSubReg() != 0) 645 return false; 646 RegisterId Reg = Op.getReg(); 647 const MCPhysReg *ImpR = Op.isDef() ? D.getImplicitDefs() 648 : D.getImplicitUses(); 649 if (!ImpR) 650 return false; 651 while (*ImpR) 652 if (*ImpR++ == Reg) 653 return true; 654 return false; 655 } 656 657 RegisterRef RegisterAggr::normalize(RegisterRef RR) const { 658 RegisterId SuperReg = RR.Reg; 659 while (true) { 660 MCSuperRegIterator SR(SuperReg, &TRI, false); 661 if (!SR.isValid()) 662 break; 663 SuperReg = *SR; 664 } 665 666 const TargetRegisterClass &RC = *TRI.getMinimalPhysRegClass(RR.Reg); 667 LaneBitmask Common = RR.Mask & RC.LaneMask; 668 uint32_t Sub = TRI.getSubRegIndex(SuperReg, RR.Reg); 669 LaneBitmask SuperMask = TRI.composeSubRegIndexLaneMask(Sub, Common); 670 return RegisterRef(SuperReg, SuperMask); 671 } 672 673 bool RegisterAggr::hasAliasOf(RegisterRef RR) const { 674 RegisterRef NR = normalize(RR); 675 auto F = Masks.find(NR.Reg); 676 if (F != Masks.end()) { 677 if ((F->second & NR.Mask).any()) 678 return true; 679 } 680 if (CheckUnits) { 681 for (MCRegUnitIterator U(RR.Reg, &TRI); U.isValid(); ++U) 682 if (ExpAliasUnits.test(*U)) 683 return true; 684 } 685 return false; 686 } 687 688 bool RegisterAggr::hasCoverOf(RegisterRef RR) const { 689 // Always have a cover for empty lane mask. 690 RegisterRef NR = normalize(RR); 691 if (NR.Mask.none()) 692 return true; 693 auto F = Masks.find(NR.Reg); 694 if (F == Masks.end()) 695 return false; 696 return (NR.Mask & F->second) == NR.Mask; 697 } 698 699 RegisterAggr &RegisterAggr::insert(RegisterRef RR) { 700 RegisterRef NR = normalize(RR); 701 auto F = Masks.find(NR.Reg); 702 if (F == Masks.end()) 703 Masks.insert({NR.Reg, NR.Mask}); 704 else 705 F->second |= NR.Mask; 706 707 // Visit all register units to see if there are any that were created 708 // by explicit aliases. Add those that were to the bit vector. 709 for (MCRegUnitIterator U(RR.Reg, &TRI); U.isValid(); ++U) { 710 MCRegUnitRootIterator R(*U, &TRI); 711 ++R; 712 if (!R.isValid()) 713 continue; 714 ExpAliasUnits.set(*U); 715 CheckUnits = true; 716 } 717 return *this; 718 } 719 720 RegisterAggr &RegisterAggr::insert(const RegisterAggr &RG) { 721 for (std::pair<RegisterId,LaneBitmask> P : RG.Masks) 722 insert(RegisterRef(P.first, P.second)); 723 return *this; 724 } 725 726 RegisterAggr &RegisterAggr::clear(RegisterRef RR) { 727 RegisterRef NR = normalize(RR); 728 auto F = Masks.find(NR.Reg); 729 if (F == Masks.end()) 730 return *this; 731 LaneBitmask NewM = F->second & ~NR.Mask; 732 if (NewM.none()) 733 Masks.erase(F); 734 else 735 F->second = NewM; 736 return *this; 737 } 738 739 RegisterAggr &RegisterAggr::clear(const RegisterAggr &RG) { 740 for (std::pair<RegisterId,LaneBitmask> P : RG.Masks) 741 clear(RegisterRef(P.first, P.second)); 742 return *this; 743 } 744 745 RegisterRef RegisterAggr::clearIn(RegisterRef RR) const { 746 RegisterAggr T(TRI); 747 T.insert(RR).clear(*this); 748 if (T.empty()) 749 return RegisterRef(); 750 return RegisterRef(T.begin()->first, T.begin()->second); 751 } 752 753 void RegisterAggr::print(raw_ostream &OS) const { 754 OS << '{'; 755 for (auto I : Masks) 756 OS << ' ' << PrintReg(I.first, &TRI) << PrintLaneMaskOpt(I.second); 757 OS << " }"; 758 } 759 760 // 761 // The data flow graph construction. 762 // 763 764 DataFlowGraph::DataFlowGraph(MachineFunction &mf, const TargetInstrInfo &tii, 765 const TargetRegisterInfo &tri, const MachineDominatorTree &mdt, 766 const MachineDominanceFrontier &mdf, const TargetOperandInfo &toi) 767 : MF(mf), TII(tii), TRI(tri), MDT(mdt), MDF(mdf), TOI(toi) { 768 } 769 770 // The implementation of the definition stack. 771 // Each register reference has its own definition stack. In particular, 772 // for a register references "Reg" and "Reg:subreg" will each have their 773 // own definition stacks. 774 775 // Construct a stack iterator. 776 DataFlowGraph::DefStack::Iterator::Iterator(const DataFlowGraph::DefStack &S, 777 bool Top) : DS(S) { 778 if (!Top) { 779 // Initialize to bottom. 780 Pos = 0; 781 return; 782 } 783 // Initialize to the top, i.e. top-most non-delimiter (or 0, if empty). 784 Pos = DS.Stack.size(); 785 while (Pos > 0 && DS.isDelimiter(DS.Stack[Pos-1])) 786 Pos--; 787 } 788 789 // Return the size of the stack, including block delimiters. 790 unsigned DataFlowGraph::DefStack::size() const { 791 unsigned S = 0; 792 for (auto I = top(), E = bottom(); I != E; I.down()) 793 S++; 794 return S; 795 } 796 797 // Remove the top entry from the stack. Remove all intervening delimiters 798 // so that after this, the stack is either empty, or the top of the stack 799 // is a non-delimiter. 800 void DataFlowGraph::DefStack::pop() { 801 assert(!empty()); 802 unsigned P = nextDown(Stack.size()); 803 Stack.resize(P); 804 } 805 806 // Push a delimiter for block node N on the stack. 807 void DataFlowGraph::DefStack::start_block(NodeId N) { 808 assert(N != 0); 809 Stack.push_back(NodeAddr<DefNode*>(nullptr, N)); 810 } 811 812 // Remove all nodes from the top of the stack, until the delimited for 813 // block node N is encountered. Remove the delimiter as well. In effect, 814 // this will remove from the stack all definitions from block N. 815 void DataFlowGraph::DefStack::clear_block(NodeId N) { 816 assert(N != 0); 817 unsigned P = Stack.size(); 818 while (P > 0) { 819 bool Found = isDelimiter(Stack[P-1], N); 820 P--; 821 if (Found) 822 break; 823 } 824 // This will also remove the delimiter, if found. 825 Stack.resize(P); 826 } 827 828 // Move the stack iterator up by one. 829 unsigned DataFlowGraph::DefStack::nextUp(unsigned P) const { 830 // Get the next valid position after P (skipping all delimiters). 831 // The input position P does not have to point to a non-delimiter. 832 unsigned SS = Stack.size(); 833 bool IsDelim; 834 assert(P < SS); 835 do { 836 P++; 837 IsDelim = isDelimiter(Stack[P-1]); 838 } while (P < SS && IsDelim); 839 assert(!IsDelim); 840 return P; 841 } 842 843 // Move the stack iterator down by one. 844 unsigned DataFlowGraph::DefStack::nextDown(unsigned P) const { 845 // Get the preceding valid position before P (skipping all delimiters). 846 // The input position P does not have to point to a non-delimiter. 847 assert(P > 0 && P <= Stack.size()); 848 bool IsDelim = isDelimiter(Stack[P-1]); 849 do { 850 if (--P == 0) 851 break; 852 IsDelim = isDelimiter(Stack[P-1]); 853 } while (P > 0 && IsDelim); 854 assert(!IsDelim); 855 return P; 856 } 857 858 // Register information. 859 860 // Get the list of references aliased to RR. Lane masks are ignored. 861 RegisterSet DataFlowGraph::getAliasSet(RegisterId Reg) const { 862 // Do not include RR in the alias set. 863 RegisterSet AS; 864 assert(TargetRegisterInfo::isPhysicalRegister(Reg)); 865 866 for (MCRegAliasIterator AI(Reg, &TRI, false); AI.isValid(); ++AI) 867 AS.insert(RegisterRef(*AI)); 868 return AS; 869 } 870 871 RegisterSet DataFlowGraph::getLandingPadLiveIns() const { 872 RegisterSet LR; 873 const Function &F = *MF.getFunction(); 874 const Constant *PF = F.hasPersonalityFn() ? F.getPersonalityFn() 875 : nullptr; 876 const TargetLowering &TLI = *MF.getSubtarget().getTargetLowering(); 877 if (RegisterId R = TLI.getExceptionPointerRegister(PF)) 878 LR.insert(RegisterRef(R)); 879 if (RegisterId R = TLI.getExceptionSelectorRegister(PF)) 880 LR.insert(RegisterRef(R)); 881 return LR; 882 } 883 884 // Node management functions. 885 886 // Get the pointer to the node with the id N. 887 NodeBase *DataFlowGraph::ptr(NodeId N) const { 888 if (N == 0) 889 return nullptr; 890 return Memory.ptr(N); 891 } 892 893 // Get the id of the node at the address P. 894 NodeId DataFlowGraph::id(const NodeBase *P) const { 895 if (P == nullptr) 896 return 0; 897 return Memory.id(P); 898 } 899 900 // Allocate a new node and set the attributes to Attrs. 901 NodeAddr<NodeBase*> DataFlowGraph::newNode(uint16_t Attrs) { 902 NodeAddr<NodeBase*> P = Memory.New(); 903 P.Addr->init(); 904 P.Addr->setAttrs(Attrs); 905 return P; 906 } 907 908 // Make a copy of the given node B, except for the data-flow links, which 909 // are set to 0. 910 NodeAddr<NodeBase*> DataFlowGraph::cloneNode(const NodeAddr<NodeBase*> B) { 911 NodeAddr<NodeBase*> NA = newNode(0); 912 memcpy(NA.Addr, B.Addr, sizeof(NodeBase)); 913 // Ref nodes need to have the data-flow links reset. 914 if (NA.Addr->getType() == NodeAttrs::Ref) { 915 NodeAddr<RefNode*> RA = NA; 916 RA.Addr->setReachingDef(0); 917 RA.Addr->setSibling(0); 918 if (NA.Addr->getKind() == NodeAttrs::Def) { 919 NodeAddr<DefNode*> DA = NA; 920 DA.Addr->setReachedDef(0); 921 DA.Addr->setReachedUse(0); 922 } 923 } 924 return NA; 925 } 926 927 // Allocation routines for specific node types/kinds. 928 929 NodeAddr<UseNode*> DataFlowGraph::newUse(NodeAddr<InstrNode*> Owner, 930 MachineOperand &Op, uint16_t Flags) { 931 NodeAddr<UseNode*> UA = newNode(NodeAttrs::Ref | NodeAttrs::Use | Flags); 932 UA.Addr->setRegRef(&Op, *this); 933 return UA; 934 } 935 936 NodeAddr<PhiUseNode*> DataFlowGraph::newPhiUse(NodeAddr<PhiNode*> Owner, 937 RegisterRef RR, NodeAddr<BlockNode*> PredB, uint16_t Flags) { 938 NodeAddr<PhiUseNode*> PUA = newNode(NodeAttrs::Ref | NodeAttrs::Use | Flags); 939 assert(Flags & NodeAttrs::PhiRef); 940 PUA.Addr->setRegRef(RR, *this); 941 PUA.Addr->setPredecessor(PredB.Id); 942 return PUA; 943 } 944 945 NodeAddr<DefNode*> DataFlowGraph::newDef(NodeAddr<InstrNode*> Owner, 946 MachineOperand &Op, uint16_t Flags) { 947 NodeAddr<DefNode*> DA = newNode(NodeAttrs::Ref | NodeAttrs::Def | Flags); 948 DA.Addr->setRegRef(&Op, *this); 949 return DA; 950 } 951 952 NodeAddr<DefNode*> DataFlowGraph::newDef(NodeAddr<InstrNode*> Owner, 953 RegisterRef RR, uint16_t Flags) { 954 NodeAddr<DefNode*> DA = newNode(NodeAttrs::Ref | NodeAttrs::Def | Flags); 955 assert(Flags & NodeAttrs::PhiRef); 956 DA.Addr->setRegRef(RR, *this); 957 return DA; 958 } 959 960 NodeAddr<PhiNode*> DataFlowGraph::newPhi(NodeAddr<BlockNode*> Owner) { 961 NodeAddr<PhiNode*> PA = newNode(NodeAttrs::Code | NodeAttrs::Phi); 962 Owner.Addr->addPhi(PA, *this); 963 return PA; 964 } 965 966 NodeAddr<StmtNode*> DataFlowGraph::newStmt(NodeAddr<BlockNode*> Owner, 967 MachineInstr *MI) { 968 NodeAddr<StmtNode*> SA = newNode(NodeAttrs::Code | NodeAttrs::Stmt); 969 SA.Addr->setCode(MI); 970 Owner.Addr->addMember(SA, *this); 971 return SA; 972 } 973 974 NodeAddr<BlockNode*> DataFlowGraph::newBlock(NodeAddr<FuncNode*> Owner, 975 MachineBasicBlock *BB) { 976 NodeAddr<BlockNode*> BA = newNode(NodeAttrs::Code | NodeAttrs::Block); 977 BA.Addr->setCode(BB); 978 Owner.Addr->addMember(BA, *this); 979 return BA; 980 } 981 982 NodeAddr<FuncNode*> DataFlowGraph::newFunc(MachineFunction *MF) { 983 NodeAddr<FuncNode*> FA = newNode(NodeAttrs::Code | NodeAttrs::Func); 984 FA.Addr->setCode(MF); 985 return FA; 986 } 987 988 // Build the data flow graph. 989 void DataFlowGraph::build(unsigned Options) { 990 reset(); 991 Func = newFunc(&MF); 992 993 if (MF.empty()) 994 return; 995 996 for (MachineBasicBlock &B : MF) { 997 NodeAddr<BlockNode*> BA = newBlock(Func, &B); 998 BlockNodes.insert(std::make_pair(&B, BA)); 999 for (MachineInstr &I : B) { 1000 if (I.isDebugValue()) 1001 continue; 1002 buildStmt(BA, I); 1003 } 1004 } 1005 1006 NodeAddr<BlockNode*> EA = Func.Addr->getEntryBlock(*this); 1007 NodeList Blocks = Func.Addr->members(*this); 1008 1009 // Collect information about block references. 1010 BlockRefsMap RefM; 1011 buildBlockRefs(EA, RefM); 1012 1013 // Add function-entry phi nodes. 1014 MachineRegisterInfo &MRI = MF.getRegInfo(); 1015 for (auto I = MRI.livein_begin(), E = MRI.livein_end(); I != E; ++I) { 1016 NodeAddr<PhiNode*> PA = newPhi(EA); 1017 RegisterRef RR = RegisterRef(I->first); 1018 uint16_t PhiFlags = NodeAttrs::PhiRef | NodeAttrs::Preserving; 1019 NodeAddr<DefNode*> DA = newDef(PA, RR, PhiFlags); 1020 PA.Addr->addMember(DA, *this); 1021 } 1022 1023 // Add phis for landing pads. 1024 // Landing pads, unlike usual backs blocks, are not entered through 1025 // branches in the program, or fall-throughs from other blocks. They 1026 // are entered from the exception handling runtime and target's ABI 1027 // may define certain registers as defined on entry to such a block. 1028 RegisterSet EHRegs = getLandingPadLiveIns(); 1029 if (!EHRegs.empty()) { 1030 for (NodeAddr<BlockNode*> BA : Blocks) { 1031 const MachineBasicBlock &B = *BA.Addr->getCode(); 1032 if (!B.isEHPad()) 1033 continue; 1034 1035 // Prepare a list of NodeIds of the block's predecessors. 1036 NodeList Preds; 1037 for (MachineBasicBlock *PB : B.predecessors()) 1038 Preds.push_back(findBlock(PB)); 1039 1040 // Build phi nodes for each live-in. 1041 for (RegisterRef RR : EHRegs) { 1042 NodeAddr<PhiNode*> PA = newPhi(BA); 1043 uint16_t PhiFlags = NodeAttrs::PhiRef | NodeAttrs::Preserving; 1044 // Add def: 1045 NodeAddr<DefNode*> DA = newDef(PA, RR, PhiFlags); 1046 PA.Addr->addMember(DA, *this); 1047 // Add uses (no reaching defs for phi uses): 1048 for (NodeAddr<BlockNode*> PBA : Preds) { 1049 NodeAddr<PhiUseNode*> PUA = newPhiUse(PA, RR, PBA); 1050 PA.Addr->addMember(PUA, *this); 1051 } 1052 } 1053 } 1054 } 1055 1056 // Build a map "PhiM" which will contain, for each block, the set 1057 // of references that will require phi definitions in that block. 1058 BlockRefsMap PhiM; 1059 for (NodeAddr<BlockNode*> BA : Blocks) 1060 recordDefsForDF(PhiM, RefM, BA); 1061 for (NodeAddr<BlockNode*> BA : Blocks) 1062 buildPhis(PhiM, RefM, BA); 1063 1064 // Link all the refs. This will recursively traverse the dominator tree. 1065 DefStackMap DM; 1066 linkBlockRefs(DM, EA); 1067 1068 // Finally, remove all unused phi nodes. 1069 if (!(Options & BuildOptions::KeepDeadPhis)) 1070 removeUnusedPhis(); 1071 } 1072 1073 RegisterRef DataFlowGraph::makeRegRef(unsigned Reg, unsigned Sub) const { 1074 assert(TargetRegisterInfo::isPhysicalRegister(Reg)); 1075 if (Sub != 0) 1076 Reg = TRI.getSubReg(Reg, Sub); 1077 return RegisterRef(Reg); 1078 } 1079 1080 RegisterRef DataFlowGraph::normalizeRef(RegisterRef RR) const { 1081 // FIXME copied from RegisterAggr 1082 RegisterId SuperReg = RR.Reg; 1083 while (true) { 1084 MCSuperRegIterator SR(SuperReg, &TRI, false); 1085 if (!SR.isValid()) 1086 break; 1087 SuperReg = *SR; 1088 } 1089 1090 uint32_t Sub = TRI.getSubRegIndex(SuperReg, RR.Reg); 1091 const TargetRegisterClass &RC = *TRI.getMinimalPhysRegClass(RR.Reg); 1092 LaneBitmask SuperMask = RR.Mask & 1093 TRI.composeSubRegIndexLaneMask(Sub, RC.LaneMask); 1094 return RegisterRef(SuperReg, SuperMask); 1095 } 1096 1097 RegisterRef DataFlowGraph::restrictRef(RegisterRef AR, RegisterRef BR) const { 1098 if (AR.Reg == BR.Reg) { 1099 LaneBitmask M = AR.Mask & BR.Mask; 1100 return M.any() ? RegisterRef(AR.Reg, M) : RegisterRef(); 1101 } 1102 #ifndef NDEBUG 1103 RegisterRef NAR = normalizeRef(AR); 1104 RegisterRef NBR = normalizeRef(BR); 1105 assert(NAR.Reg != NBR.Reg); 1106 #endif 1107 // This isn't strictly correct, because the overlap may happen in the 1108 // part masked out. 1109 if (TRI.regsOverlap(AR.Reg, BR.Reg)) 1110 return AR; 1111 return RegisterRef(); 1112 } 1113 1114 // For each stack in the map DefM, push the delimiter for block B on it. 1115 void DataFlowGraph::markBlock(NodeId B, DefStackMap &DefM) { 1116 // Push block delimiters. 1117 for (auto I = DefM.begin(), E = DefM.end(); I != E; ++I) 1118 I->second.start_block(B); 1119 } 1120 1121 // Remove all definitions coming from block B from each stack in DefM. 1122 void DataFlowGraph::releaseBlock(NodeId B, DefStackMap &DefM) { 1123 // Pop all defs from this block from the definition stack. Defs that were 1124 // added to the map during the traversal of instructions will not have a 1125 // delimiter, but for those, the whole stack will be emptied. 1126 for (auto I = DefM.begin(), E = DefM.end(); I != E; ++I) 1127 I->second.clear_block(B); 1128 1129 // Finally, remove empty stacks from the map. 1130 for (auto I = DefM.begin(), E = DefM.end(), NextI = I; I != E; I = NextI) { 1131 NextI = std::next(I); 1132 // This preserves the validity of iterators other than I. 1133 if (I->second.empty()) 1134 DefM.erase(I); 1135 } 1136 } 1137 1138 // Push all definitions from the instruction node IA to an appropriate 1139 // stack in DefM. 1140 void DataFlowGraph::pushDefs(NodeAddr<InstrNode*> IA, DefStackMap &DefM) { 1141 NodeList Defs = IA.Addr->members_if(IsDef, *this); 1142 NodeSet Visited; 1143 #ifndef NDEBUG 1144 RegisterSet Defined; 1145 #endif 1146 1147 // The important objectives of this function are: 1148 // - to be able to handle instructions both while the graph is being 1149 // constructed, and after the graph has been constructed, and 1150 // - maintain proper ordering of definitions on the stack for each 1151 // register reference: 1152 // - if there are two or more related defs in IA (i.e. coming from 1153 // the same machine operand), then only push one def on the stack, 1154 // - if there are multiple unrelated defs of non-overlapping 1155 // subregisters of S, then the stack for S will have both (in an 1156 // unspecified order), but the order does not matter from the data- 1157 // -flow perspective. 1158 1159 for (NodeAddr<DefNode*> DA : Defs) { 1160 if (Visited.count(DA.Id)) 1161 continue; 1162 1163 NodeList Rel = getRelatedRefs(IA, DA); 1164 NodeAddr<DefNode*> PDA = Rel.front(); 1165 RegisterRef RR = PDA.Addr->getRegRef(*this); 1166 #ifndef NDEBUG 1167 // Assert if the register is defined in two or more unrelated defs. 1168 // This could happen if there are two or more def operands defining it. 1169 if (!Defined.insert(RR).second) { 1170 MachineInstr *MI = NodeAddr<StmtNode*>(IA).Addr->getCode(); 1171 dbgs() << "Multiple definitions of register: " 1172 << Print<RegisterRef>(RR, *this) << " in\n " << *MI 1173 << "in BB#" << MI->getParent()->getNumber() << '\n'; 1174 llvm_unreachable(nullptr); 1175 } 1176 #endif 1177 // Push the definition on the stack for the register and all aliases. 1178 // The def stack traversal in linkNodeUp will check the exact aliasing. 1179 DefM[RR.Reg].push(DA); 1180 for (RegisterRef A : getAliasSet(RR.Reg /*FIXME? use RegisterRef*/)) { 1181 // Check that we don't push the same def twice. 1182 assert(A != RR); 1183 DefM[A.Reg].push(DA); 1184 } 1185 // Mark all the related defs as visited. 1186 for (NodeAddr<NodeBase*> T : Rel) 1187 Visited.insert(T.Id); 1188 } 1189 } 1190 1191 // Return the list of all reference nodes related to RA, including RA itself. 1192 // See "getNextRelated" for the meaning of a "related reference". 1193 NodeList DataFlowGraph::getRelatedRefs(NodeAddr<InstrNode*> IA, 1194 NodeAddr<RefNode*> RA) const { 1195 assert(IA.Id != 0 && RA.Id != 0); 1196 1197 NodeList Refs; 1198 NodeId Start = RA.Id; 1199 do { 1200 Refs.push_back(RA); 1201 RA = getNextRelated(IA, RA); 1202 } while (RA.Id != 0 && RA.Id != Start); 1203 return Refs; 1204 } 1205 1206 // Return true if RA and RB overlap, false otherwise. 1207 bool DataFlowGraph::alias(RegisterRef RA, RegisterRef RB) const { 1208 assert(TargetRegisterInfo::isPhysicalRegister(RA.Reg)); 1209 assert(TargetRegisterInfo::isPhysicalRegister(RB.Reg)); 1210 1211 MCRegUnitMaskIterator UMA(RA.Reg, &TRI); 1212 MCRegUnitMaskIterator UMB(RB.Reg, &TRI); 1213 // Reg units are returned in the numerical order. 1214 while (UMA.isValid() && UMB.isValid()) { 1215 std::pair<uint32_t,LaneBitmask> PA = *UMA; 1216 std::pair<uint32_t,LaneBitmask> PB = *UMB; 1217 if (PA.first == PB.first) { 1218 // Lane mask of 0 (given by the iterator) should be treated as "full". 1219 // This can happen when the register has only one unit, or when the 1220 // unit corresponds to explicit aliasing. In such cases, the lane mask 1221 // from RegisterRef should be ignored. 1222 if (PA.second.none() || PB.second.none()) 1223 return true; 1224 1225 // At this point the common unit corresponds to a subregister. The lane 1226 // masks correspond to the lane mask of that unit within the original 1227 // register, for example assuming register quadruple q0 = r3:0, and 1228 // a register pair d1 = r3:2, the lane mask of r2 in q0 may be 0b0100, 1229 // while the lane mask of r2 in d1 may be 0b0001. 1230 LaneBitmask LA = PA.second & RA.Mask; 1231 LaneBitmask LB = PB.second & RB.Mask; 1232 if (LA.any() && LB.any()) { 1233 unsigned Root = *MCRegUnitRootIterator(PA.first, &TRI); 1234 // If register units were guaranteed to only have 1 bit in any lane 1235 // mask, the code below would not be necessary. This is because LA 1236 // and LB would have at most 1 bit set each, and that bit would be 1237 // guaranteed to correspond to the given register unit. 1238 uint32_t SubA = TRI.getSubRegIndex(RA.Reg, Root); 1239 uint32_t SubB = TRI.getSubRegIndex(RB.Reg, Root); 1240 const TargetRegisterClass &RC = *TRI.getMinimalPhysRegClass(Root); 1241 LaneBitmask MaskA = TRI.reverseComposeSubRegIndexLaneMask(SubA, LA); 1242 LaneBitmask MaskB = TRI.reverseComposeSubRegIndexLaneMask(SubB, LB); 1243 if ((MaskA & MaskB & RC.LaneMask).any()) 1244 return true; 1245 } 1246 1247 ++UMA; 1248 ++UMB; 1249 continue; 1250 } 1251 if (PA.first < PB.first) 1252 ++UMA; 1253 else if (PB.first < PA.first) 1254 ++UMB; 1255 } 1256 return false; 1257 } 1258 1259 // Clear all information in the graph. 1260 void DataFlowGraph::reset() { 1261 Memory.clear(); 1262 BlockNodes.clear(); 1263 Func = NodeAddr<FuncNode*>(); 1264 } 1265 1266 // Return the next reference node in the instruction node IA that is related 1267 // to RA. Conceptually, two reference nodes are related if they refer to the 1268 // same instance of a register access, but differ in flags or other minor 1269 // characteristics. Specific examples of related nodes are shadow reference 1270 // nodes. 1271 // Return the equivalent of nullptr if there are no more related references. 1272 NodeAddr<RefNode*> DataFlowGraph::getNextRelated(NodeAddr<InstrNode*> IA, 1273 NodeAddr<RefNode*> RA) const { 1274 assert(IA.Id != 0 && RA.Id != 0); 1275 1276 auto Related = [this,RA](NodeAddr<RefNode*> TA) -> bool { 1277 if (TA.Addr->getKind() != RA.Addr->getKind()) 1278 return false; 1279 if (TA.Addr->getRegRef(*this) != RA.Addr->getRegRef(*this)) 1280 return false; 1281 return true; 1282 }; 1283 auto RelatedStmt = [&Related,RA](NodeAddr<RefNode*> TA) -> bool { 1284 return Related(TA) && 1285 &RA.Addr->getOp() == &TA.Addr->getOp(); 1286 }; 1287 auto RelatedPhi = [&Related,RA](NodeAddr<RefNode*> TA) -> bool { 1288 if (!Related(TA)) 1289 return false; 1290 if (TA.Addr->getKind() != NodeAttrs::Use) 1291 return true; 1292 // For phi uses, compare predecessor blocks. 1293 const NodeAddr<const PhiUseNode*> TUA = TA; 1294 const NodeAddr<const PhiUseNode*> RUA = RA; 1295 return TUA.Addr->getPredecessor() == RUA.Addr->getPredecessor(); 1296 }; 1297 1298 RegisterRef RR = RA.Addr->getRegRef(*this); 1299 if (IA.Addr->getKind() == NodeAttrs::Stmt) 1300 return RA.Addr->getNextRef(RR, RelatedStmt, true, *this); 1301 return RA.Addr->getNextRef(RR, RelatedPhi, true, *this); 1302 } 1303 1304 // Find the next node related to RA in IA that satisfies condition P. 1305 // If such a node was found, return a pair where the second element is the 1306 // located node. If such a node does not exist, return a pair where the 1307 // first element is the element after which such a node should be inserted, 1308 // and the second element is a null-address. 1309 template <typename Predicate> 1310 std::pair<NodeAddr<RefNode*>,NodeAddr<RefNode*>> 1311 DataFlowGraph::locateNextRef(NodeAddr<InstrNode*> IA, NodeAddr<RefNode*> RA, 1312 Predicate P) const { 1313 assert(IA.Id != 0 && RA.Id != 0); 1314 1315 NodeAddr<RefNode*> NA; 1316 NodeId Start = RA.Id; 1317 while (true) { 1318 NA = getNextRelated(IA, RA); 1319 if (NA.Id == 0 || NA.Id == Start) 1320 break; 1321 if (P(NA)) 1322 break; 1323 RA = NA; 1324 } 1325 1326 if (NA.Id != 0 && NA.Id != Start) 1327 return std::make_pair(RA, NA); 1328 return std::make_pair(RA, NodeAddr<RefNode*>()); 1329 } 1330 1331 // Get the next shadow node in IA corresponding to RA, and optionally create 1332 // such a node if it does not exist. 1333 NodeAddr<RefNode*> DataFlowGraph::getNextShadow(NodeAddr<InstrNode*> IA, 1334 NodeAddr<RefNode*> RA, bool Create) { 1335 assert(IA.Id != 0 && RA.Id != 0); 1336 1337 uint16_t Flags = RA.Addr->getFlags() | NodeAttrs::Shadow; 1338 auto IsShadow = [Flags] (NodeAddr<RefNode*> TA) -> bool { 1339 return TA.Addr->getFlags() == Flags; 1340 }; 1341 auto Loc = locateNextRef(IA, RA, IsShadow); 1342 if (Loc.second.Id != 0 || !Create) 1343 return Loc.second; 1344 1345 // Create a copy of RA and mark is as shadow. 1346 NodeAddr<RefNode*> NA = cloneNode(RA); 1347 NA.Addr->setFlags(Flags | NodeAttrs::Shadow); 1348 IA.Addr->addMemberAfter(Loc.first, NA, *this); 1349 return NA; 1350 } 1351 1352 // Get the next shadow node in IA corresponding to RA. Return null-address 1353 // if such a node does not exist. 1354 NodeAddr<RefNode*> DataFlowGraph::getNextShadow(NodeAddr<InstrNode*> IA, 1355 NodeAddr<RefNode*> RA) const { 1356 assert(IA.Id != 0 && RA.Id != 0); 1357 uint16_t Flags = RA.Addr->getFlags() | NodeAttrs::Shadow; 1358 auto IsShadow = [Flags] (NodeAddr<RefNode*> TA) -> bool { 1359 return TA.Addr->getFlags() == Flags; 1360 }; 1361 return locateNextRef(IA, RA, IsShadow).second; 1362 } 1363 1364 // Create a new statement node in the block node BA that corresponds to 1365 // the machine instruction MI. 1366 void DataFlowGraph::buildStmt(NodeAddr<BlockNode*> BA, MachineInstr &In) { 1367 NodeAddr<StmtNode*> SA = newStmt(BA, &In); 1368 1369 auto isCall = [] (const MachineInstr &In) -> bool { 1370 if (In.isCall()) 1371 return true; 1372 // Is tail call? 1373 if (In.isBranch()) 1374 for (const MachineOperand &Op : In.operands()) 1375 if (Op.isGlobal() || Op.isSymbol()) 1376 return true; 1377 return false; 1378 }; 1379 1380 auto isDefUndef = [this] (const MachineInstr &In, RegisterRef DR) -> bool { 1381 // This instruction defines DR. Check if there is a use operand that 1382 // would make DR live on entry to the instruction. 1383 for (const MachineOperand &UseOp : In.operands()) { 1384 if (!UseOp.isReg() || !UseOp.isUse() || UseOp.isUndef()) 1385 continue; 1386 RegisterRef UR = makeRegRef(UseOp.getReg(), UseOp.getSubReg()); 1387 if (alias(DR, UR)) 1388 return false; 1389 } 1390 return true; 1391 }; 1392 1393 // Collect a set of registers that this instruction implicitly uses 1394 // or defines. Implicit operands from an instruction will be ignored 1395 // unless they are listed here. 1396 RegisterSet ImpUses, ImpDefs; 1397 if (const uint16_t *ImpD = In.getDesc().getImplicitDefs()) 1398 while (uint16_t R = *ImpD++) 1399 ImpDefs.insert(RegisterRef(R)); 1400 if (const uint16_t *ImpU = In.getDesc().getImplicitUses()) 1401 while (uint16_t R = *ImpU++) 1402 ImpUses.insert(RegisterRef(R)); 1403 1404 bool IsCall = isCall(In); 1405 bool NeedsImplicit = IsCall || In.isInlineAsm() || In.isReturn(); 1406 bool IsPredicated = TII.isPredicated(In); 1407 unsigned NumOps = In.getNumOperands(); 1408 1409 // Avoid duplicate implicit defs. This will not detect cases of implicit 1410 // defs that define registers that overlap, but it is not clear how to 1411 // interpret that in the absence of explicit defs. Overlapping explicit 1412 // defs are likely illegal already. 1413 RegisterSet DoneDefs; 1414 // Process explicit defs first. 1415 for (unsigned OpN = 0; OpN < NumOps; ++OpN) { 1416 MachineOperand &Op = In.getOperand(OpN); 1417 if (!Op.isReg() || !Op.isDef() || Op.isImplicit()) 1418 continue; 1419 RegisterRef RR = makeRegRef(Op.getReg(), Op.getSubReg()); 1420 uint16_t Flags = NodeAttrs::None; 1421 if (TOI.isPreserving(In, OpN)) { 1422 Flags |= NodeAttrs::Preserving; 1423 // If the def is preserving, check if it is also undefined. 1424 if (isDefUndef(In, RR)) 1425 Flags |= NodeAttrs::Undef; 1426 } 1427 if (TOI.isClobbering(In, OpN)) 1428 Flags |= NodeAttrs::Clobbering; 1429 if (TOI.isFixedReg(In, OpN)) 1430 Flags |= NodeAttrs::Fixed; 1431 if (IsCall && Op.isDead()) 1432 Flags |= NodeAttrs::Dead; 1433 NodeAddr<DefNode*> DA = newDef(SA, Op, Flags); 1434 SA.Addr->addMember(DA, *this); 1435 DoneDefs.insert(RR); 1436 } 1437 1438 // Process implicit defs, skipping those that have already been added 1439 // as explicit. 1440 for (unsigned OpN = 0; OpN < NumOps; ++OpN) { 1441 MachineOperand &Op = In.getOperand(OpN); 1442 if (!Op.isReg() || !Op.isDef() || !Op.isImplicit()) 1443 continue; 1444 RegisterRef RR = makeRegRef(Op.getReg(), Op.getSubReg()); 1445 if (!NeedsImplicit && !ImpDefs.count(RR)) 1446 continue; 1447 if (DoneDefs.count(RR)) 1448 continue; 1449 uint16_t Flags = NodeAttrs::None; 1450 if (TOI.isPreserving(In, OpN)) { 1451 Flags |= NodeAttrs::Preserving; 1452 // If the def is preserving, check if it is also undefined. 1453 if (isDefUndef(In, RR)) 1454 Flags |= NodeAttrs::Undef; 1455 } 1456 if (TOI.isClobbering(In, OpN)) 1457 Flags |= NodeAttrs::Clobbering; 1458 if (TOI.isFixedReg(In, OpN)) 1459 Flags |= NodeAttrs::Fixed; 1460 if (IsCall && Op.isDead()) 1461 Flags |= NodeAttrs::Dead; 1462 NodeAddr<DefNode*> DA = newDef(SA, Op, Flags); 1463 SA.Addr->addMember(DA, *this); 1464 DoneDefs.insert(RR); 1465 } 1466 1467 for (unsigned OpN = 0; OpN < NumOps; ++OpN) { 1468 MachineOperand &Op = In.getOperand(OpN); 1469 if (!Op.isReg() || !Op.isUse()) 1470 continue; 1471 RegisterRef RR = makeRegRef(Op.getReg(), Op.getSubReg()); 1472 // Add implicit uses on return and call instructions, and on predicated 1473 // instructions regardless of whether or not they appear in the instruction 1474 // descriptor's list. 1475 bool Implicit = Op.isImplicit(); 1476 bool TakeImplicit = NeedsImplicit || IsPredicated; 1477 if (Implicit && !TakeImplicit && !ImpUses.count(RR)) 1478 continue; 1479 uint16_t Flags = NodeAttrs::None; 1480 if (Op.isUndef()) 1481 Flags |= NodeAttrs::Undef; 1482 if (TOI.isFixedReg(In, OpN)) 1483 Flags |= NodeAttrs::Fixed; 1484 NodeAddr<UseNode*> UA = newUse(SA, Op, Flags); 1485 SA.Addr->addMember(UA, *this); 1486 } 1487 } 1488 1489 // Build a map that for each block will have the set of all references from 1490 // that block, and from all blocks dominated by it. 1491 void DataFlowGraph::buildBlockRefs(NodeAddr<BlockNode*> BA, 1492 BlockRefsMap &RefM) { 1493 RegisterSet &Refs = RefM[BA.Id]; 1494 MachineDomTreeNode *N = MDT.getNode(BA.Addr->getCode()); 1495 assert(N); 1496 for (auto I : *N) { 1497 MachineBasicBlock *SB = I->getBlock(); 1498 NodeAddr<BlockNode*> SBA = findBlock(SB); 1499 buildBlockRefs(SBA, RefM); 1500 const RegisterSet &RefsS = RefM[SBA.Id]; 1501 Refs.insert(RefsS.begin(), RefsS.end()); 1502 } 1503 1504 for (NodeAddr<InstrNode*> IA : BA.Addr->members(*this)) 1505 for (NodeAddr<RefNode*> RA : IA.Addr->members(*this)) 1506 Refs.insert(RA.Addr->getRegRef(*this)); 1507 } 1508 1509 // Scan all defs in the block node BA and record in PhiM the locations of 1510 // phi nodes corresponding to these defs. 1511 void DataFlowGraph::recordDefsForDF(BlockRefsMap &PhiM, BlockRefsMap &RefM, 1512 NodeAddr<BlockNode*> BA) { 1513 // Check all defs from block BA and record them in each block in BA's 1514 // iterated dominance frontier. This information will later be used to 1515 // create phi nodes. 1516 MachineBasicBlock *BB = BA.Addr->getCode(); 1517 assert(BB); 1518 auto DFLoc = MDF.find(BB); 1519 if (DFLoc == MDF.end() || DFLoc->second.empty()) 1520 return; 1521 1522 // Traverse all instructions in the block and collect the set of all 1523 // defined references. For each reference there will be a phi created 1524 // in the block's iterated dominance frontier. 1525 // This is done to make sure that each defined reference gets only one 1526 // phi node, even if it is defined multiple times. 1527 RegisterSet Defs; 1528 for (NodeAddr<InstrNode*> IA : BA.Addr->members(*this)) 1529 for (NodeAddr<RefNode*> RA : IA.Addr->members_if(IsDef, *this)) 1530 Defs.insert(RA.Addr->getRegRef(*this)); 1531 1532 // Calculate the iterated dominance frontier of BB. 1533 const MachineDominanceFrontier::DomSetType &DF = DFLoc->second; 1534 SetVector<MachineBasicBlock*> IDF(DF.begin(), DF.end()); 1535 for (unsigned i = 0; i < IDF.size(); ++i) { 1536 auto F = MDF.find(IDF[i]); 1537 if (F != MDF.end()) 1538 IDF.insert(F->second.begin(), F->second.end()); 1539 } 1540 1541 // Get the register references that are reachable from this block. 1542 RegisterSet &Refs = RefM[BA.Id]; 1543 for (auto DB : IDF) { 1544 NodeAddr<BlockNode*> DBA = findBlock(DB); 1545 const RegisterSet &RefsD = RefM[DBA.Id]; 1546 Refs.insert(RefsD.begin(), RefsD.end()); 1547 } 1548 1549 // Finally, add the set of defs to each block in the iterated dominance 1550 // frontier. 1551 for (auto DB : IDF) { 1552 NodeAddr<BlockNode*> DBA = findBlock(DB); 1553 PhiM[DBA.Id].insert(Defs.begin(), Defs.end()); 1554 } 1555 } 1556 1557 // Given the locations of phi nodes in the map PhiM, create the phi nodes 1558 // that are located in the block node BA. 1559 void DataFlowGraph::buildPhis(BlockRefsMap &PhiM, BlockRefsMap &RefM, 1560 NodeAddr<BlockNode*> BA) { 1561 // Check if this blocks has any DF defs, i.e. if there are any defs 1562 // that this block is in the iterated dominance frontier of. 1563 auto HasDF = PhiM.find(BA.Id); 1564 if (HasDF == PhiM.end() || HasDF->second.empty()) 1565 return; 1566 1567 // First, remove all R in Refs in such that there exists T in Refs 1568 // such that T covers R. In other words, only leave those refs that 1569 // are not covered by another ref (i.e. maximal with respect to covering). 1570 1571 auto MaxCoverIn = [this] (RegisterRef RR, RegisterSet &RRs) -> RegisterRef { 1572 for (RegisterRef I : RRs) 1573 if (I != RR && RegisterAggr::isCoverOf(I, RR, TRI)) 1574 RR = I; 1575 return RR; 1576 }; 1577 1578 RegisterSet MaxDF; 1579 for (RegisterRef I : HasDF->second) 1580 MaxDF.insert(MaxCoverIn(I, HasDF->second)); 1581 1582 std::vector<RegisterRef> MaxRefs; 1583 RegisterSet &RefB = RefM[BA.Id]; 1584 for (RegisterRef I : MaxDF) 1585 MaxRefs.push_back(MaxCoverIn(I, RefB)); 1586 1587 // Now, for each R in MaxRefs, get the alias closure of R. If the closure 1588 // only has R in it, create a phi a def for R. Otherwise, create a phi, 1589 // and add a def for each S in the closure. 1590 1591 // Sort the refs so that the phis will be created in a deterministic order. 1592 std::sort(MaxRefs.begin(), MaxRefs.end()); 1593 // Remove duplicates. 1594 auto NewEnd = std::unique(MaxRefs.begin(), MaxRefs.end()); 1595 MaxRefs.erase(NewEnd, MaxRefs.end()); 1596 1597 auto Aliased = [this,&MaxRefs](RegisterRef RR, 1598 std::vector<unsigned> &Closure) -> bool { 1599 for (unsigned I : Closure) 1600 if (alias(RR, MaxRefs[I])) 1601 return true; 1602 return false; 1603 }; 1604 1605 // Prepare a list of NodeIds of the block's predecessors. 1606 NodeList Preds; 1607 const MachineBasicBlock *MBB = BA.Addr->getCode(); 1608 for (MachineBasicBlock *PB : MBB->predecessors()) 1609 Preds.push_back(findBlock(PB)); 1610 1611 while (!MaxRefs.empty()) { 1612 // Put the first element in the closure, and then add all subsequent 1613 // elements from MaxRefs to it, if they alias at least one element 1614 // already in the closure. 1615 // ClosureIdx: vector of indices in MaxRefs of members of the closure. 1616 std::vector<unsigned> ClosureIdx = { 0 }; 1617 for (unsigned i = 1; i != MaxRefs.size(); ++i) 1618 if (Aliased(MaxRefs[i], ClosureIdx)) 1619 ClosureIdx.push_back(i); 1620 1621 // Build a phi for the closure. 1622 unsigned CS = ClosureIdx.size(); 1623 NodeAddr<PhiNode*> PA = newPhi(BA); 1624 1625 // Add defs. 1626 for (unsigned X = 0; X != CS; ++X) { 1627 RegisterRef RR = MaxRefs[ClosureIdx[X]]; 1628 uint16_t PhiFlags = NodeAttrs::PhiRef | NodeAttrs::Preserving; 1629 NodeAddr<DefNode*> DA = newDef(PA, RR, PhiFlags); 1630 PA.Addr->addMember(DA, *this); 1631 } 1632 // Add phi uses. 1633 for (NodeAddr<BlockNode*> PBA : Preds) { 1634 for (unsigned X = 0; X != CS; ++X) { 1635 RegisterRef RR = MaxRefs[ClosureIdx[X]]; 1636 NodeAddr<PhiUseNode*> PUA = newPhiUse(PA, RR, PBA); 1637 PA.Addr->addMember(PUA, *this); 1638 } 1639 } 1640 1641 // Erase from MaxRefs all elements in the closure. 1642 auto Begin = MaxRefs.begin(); 1643 for (unsigned i = ClosureIdx.size(); i != 0; --i) 1644 MaxRefs.erase(Begin + ClosureIdx[i-1]); 1645 } 1646 } 1647 1648 // Remove any unneeded phi nodes that were created during the build process. 1649 void DataFlowGraph::removeUnusedPhis() { 1650 // This will remove unused phis, i.e. phis where each def does not reach 1651 // any uses or other defs. This will not detect or remove circular phi 1652 // chains that are otherwise dead. Unused/dead phis are created during 1653 // the build process and this function is intended to remove these cases 1654 // that are easily determinable to be unnecessary. 1655 1656 SetVector<NodeId> PhiQ; 1657 for (NodeAddr<BlockNode*> BA : Func.Addr->members(*this)) { 1658 for (auto P : BA.Addr->members_if(IsPhi, *this)) 1659 PhiQ.insert(P.Id); 1660 } 1661 1662 static auto HasUsedDef = [](NodeList &Ms) -> bool { 1663 for (NodeAddr<NodeBase*> M : Ms) { 1664 if (M.Addr->getKind() != NodeAttrs::Def) 1665 continue; 1666 NodeAddr<DefNode*> DA = M; 1667 if (DA.Addr->getReachedDef() != 0 || DA.Addr->getReachedUse() != 0) 1668 return true; 1669 } 1670 return false; 1671 }; 1672 1673 // Any phi, if it is removed, may affect other phis (make them dead). 1674 // For each removed phi, collect the potentially affected phis and add 1675 // them back to the queue. 1676 while (!PhiQ.empty()) { 1677 auto PA = addr<PhiNode*>(PhiQ[0]); 1678 PhiQ.remove(PA.Id); 1679 NodeList Refs = PA.Addr->members(*this); 1680 if (HasUsedDef(Refs)) 1681 continue; 1682 for (NodeAddr<RefNode*> RA : Refs) { 1683 if (NodeId RD = RA.Addr->getReachingDef()) { 1684 auto RDA = addr<DefNode*>(RD); 1685 NodeAddr<InstrNode*> OA = RDA.Addr->getOwner(*this); 1686 if (IsPhi(OA)) 1687 PhiQ.insert(OA.Id); 1688 } 1689 if (RA.Addr->isDef()) 1690 unlinkDef(RA, true); 1691 else 1692 unlinkUse(RA, true); 1693 } 1694 NodeAddr<BlockNode*> BA = PA.Addr->getOwner(*this); 1695 BA.Addr->removeMember(PA, *this); 1696 } 1697 } 1698 1699 // For a given reference node TA in an instruction node IA, connect the 1700 // reaching def of TA to the appropriate def node. Create any shadow nodes 1701 // as appropriate. 1702 template <typename T> 1703 void DataFlowGraph::linkRefUp(NodeAddr<InstrNode*> IA, NodeAddr<T> TA, 1704 DefStack &DS) { 1705 if (DS.empty()) 1706 return; 1707 RegisterRef RR = TA.Addr->getRegRef(*this); 1708 NodeAddr<T> TAP; 1709 1710 // References from the def stack that have been examined so far. 1711 RegisterAggr Defs(TRI); 1712 1713 for (auto I = DS.top(), E = DS.bottom(); I != E; I.down()) { 1714 RegisterRef QR = I->Addr->getRegRef(*this); 1715 1716 // Skip all defs that are aliased to any of the defs that we have already 1717 // seen. If this completes a cover of RR, stop the stack traversal. 1718 bool Alias = Defs.hasAliasOf(QR); 1719 bool Cover = Defs.insert(QR).hasCoverOf(RR); 1720 if (Alias) { 1721 if (Cover) 1722 break; 1723 continue; 1724 } 1725 1726 // The reaching def. 1727 NodeAddr<DefNode*> RDA = *I; 1728 1729 // Pick the reached node. 1730 if (TAP.Id == 0) { 1731 TAP = TA; 1732 } else { 1733 // Mark the existing ref as "shadow" and create a new shadow. 1734 TAP.Addr->setFlags(TAP.Addr->getFlags() | NodeAttrs::Shadow); 1735 TAP = getNextShadow(IA, TAP, true); 1736 } 1737 1738 // Create the link. 1739 TAP.Addr->linkToDef(TAP.Id, RDA); 1740 1741 if (Cover) 1742 break; 1743 } 1744 } 1745 1746 // Create data-flow links for all reference nodes in the statement node SA. 1747 void DataFlowGraph::linkStmtRefs(DefStackMap &DefM, NodeAddr<StmtNode*> SA) { 1748 #ifndef NDEBUG 1749 RegisterSet Defs; 1750 #endif 1751 1752 // Link all nodes (upwards in the data-flow) with their reaching defs. 1753 for (NodeAddr<RefNode*> RA : SA.Addr->members(*this)) { 1754 uint16_t Kind = RA.Addr->getKind(); 1755 assert(Kind == NodeAttrs::Def || Kind == NodeAttrs::Use); 1756 RegisterRef RR = RA.Addr->getRegRef(*this); 1757 #ifndef NDEBUG 1758 // Do not expect multiple defs of the same reference. 1759 assert(Kind != NodeAttrs::Def || !Defs.count(RR)); 1760 Defs.insert(RR); 1761 #endif 1762 1763 auto F = DefM.find(RR.Reg); 1764 if (F == DefM.end()) 1765 continue; 1766 DefStack &DS = F->second; 1767 if (Kind == NodeAttrs::Use) 1768 linkRefUp<UseNode*>(SA, RA, DS); 1769 else if (Kind == NodeAttrs::Def) 1770 linkRefUp<DefNode*>(SA, RA, DS); 1771 else 1772 llvm_unreachable("Unexpected node in instruction"); 1773 } 1774 } 1775 1776 // Create data-flow links for all instructions in the block node BA. This 1777 // will include updating any phi nodes in BA. 1778 void DataFlowGraph::linkBlockRefs(DefStackMap &DefM, NodeAddr<BlockNode*> BA) { 1779 // Push block delimiters. 1780 markBlock(BA.Id, DefM); 1781 1782 assert(BA.Addr && "block node address is needed to create a data-flow link"); 1783 // For each non-phi instruction in the block, link all the defs and uses 1784 // to their reaching defs. For any member of the block (including phis), 1785 // push the defs on the corresponding stacks. 1786 for (NodeAddr<InstrNode*> IA : BA.Addr->members(*this)) { 1787 // Ignore phi nodes here. They will be linked part by part from the 1788 // predecessors. 1789 if (IA.Addr->getKind() == NodeAttrs::Stmt) 1790 linkStmtRefs(DefM, IA); 1791 1792 // Push the definitions on the stack. 1793 pushDefs(IA, DefM); 1794 } 1795 1796 // Recursively process all children in the dominator tree. 1797 MachineDomTreeNode *N = MDT.getNode(BA.Addr->getCode()); 1798 for (auto I : *N) { 1799 MachineBasicBlock *SB = I->getBlock(); 1800 NodeAddr<BlockNode*> SBA = findBlock(SB); 1801 linkBlockRefs(DefM, SBA); 1802 } 1803 1804 // Link the phi uses from the successor blocks. 1805 auto IsUseForBA = [BA](NodeAddr<NodeBase*> NA) -> bool { 1806 if (NA.Addr->getKind() != NodeAttrs::Use) 1807 return false; 1808 assert(NA.Addr->getFlags() & NodeAttrs::PhiRef); 1809 NodeAddr<PhiUseNode*> PUA = NA; 1810 return PUA.Addr->getPredecessor() == BA.Id; 1811 }; 1812 1813 RegisterSet EHLiveIns = getLandingPadLiveIns(); 1814 MachineBasicBlock *MBB = BA.Addr->getCode(); 1815 1816 for (MachineBasicBlock *SB : MBB->successors()) { 1817 bool IsEHPad = SB->isEHPad(); 1818 NodeAddr<BlockNode*> SBA = findBlock(SB); 1819 for (NodeAddr<InstrNode*> IA : SBA.Addr->members_if(IsPhi, *this)) { 1820 // Do not link phi uses for landing pad live-ins. 1821 if (IsEHPad) { 1822 // Find what register this phi is for. 1823 NodeAddr<RefNode*> RA = IA.Addr->getFirstMember(*this); 1824 assert(RA.Id != 0); 1825 if (EHLiveIns.count(RA.Addr->getRegRef(*this))) 1826 continue; 1827 } 1828 // Go over each phi use associated with MBB, and link it. 1829 for (auto U : IA.Addr->members_if(IsUseForBA, *this)) { 1830 NodeAddr<PhiUseNode*> PUA = U; 1831 RegisterRef RR = PUA.Addr->getRegRef(*this); 1832 linkRefUp<UseNode*>(IA, PUA, DefM[RR.Reg]); 1833 } 1834 } 1835 } 1836 1837 // Pop all defs from this block from the definition stacks. 1838 releaseBlock(BA.Id, DefM); 1839 } 1840 1841 // Remove the use node UA from any data-flow and structural links. 1842 void DataFlowGraph::unlinkUseDF(NodeAddr<UseNode*> UA) { 1843 NodeId RD = UA.Addr->getReachingDef(); 1844 NodeId Sib = UA.Addr->getSibling(); 1845 1846 if (RD == 0) { 1847 assert(Sib == 0); 1848 return; 1849 } 1850 1851 auto RDA = addr<DefNode*>(RD); 1852 auto TA = addr<UseNode*>(RDA.Addr->getReachedUse()); 1853 if (TA.Id == UA.Id) { 1854 RDA.Addr->setReachedUse(Sib); 1855 return; 1856 } 1857 1858 while (TA.Id != 0) { 1859 NodeId S = TA.Addr->getSibling(); 1860 if (S == UA.Id) { 1861 TA.Addr->setSibling(UA.Addr->getSibling()); 1862 return; 1863 } 1864 TA = addr<UseNode*>(S); 1865 } 1866 } 1867 1868 // Remove the def node DA from any data-flow and structural links. 1869 void DataFlowGraph::unlinkDefDF(NodeAddr<DefNode*> DA) { 1870 // 1871 // RD 1872 // | reached 1873 // | def 1874 // : 1875 // . 1876 // +----+ 1877 // ... -- | DA | -- ... -- 0 : sibling chain of DA 1878 // +----+ 1879 // | | reached 1880 // | : def 1881 // | . 1882 // | ... : Siblings (defs) 1883 // | 1884 // : reached 1885 // . use 1886 // ... : sibling chain of reached uses 1887 1888 NodeId RD = DA.Addr->getReachingDef(); 1889 1890 // Visit all siblings of the reached def and reset their reaching defs. 1891 // Also, defs reached by DA are now "promoted" to being reached by RD, 1892 // so all of them will need to be spliced into the sibling chain where 1893 // DA belongs. 1894 auto getAllNodes = [this] (NodeId N) -> NodeList { 1895 NodeList Res; 1896 while (N) { 1897 auto RA = addr<RefNode*>(N); 1898 // Keep the nodes in the exact sibling order. 1899 Res.push_back(RA); 1900 N = RA.Addr->getSibling(); 1901 } 1902 return Res; 1903 }; 1904 NodeList ReachedDefs = getAllNodes(DA.Addr->getReachedDef()); 1905 NodeList ReachedUses = getAllNodes(DA.Addr->getReachedUse()); 1906 1907 if (RD == 0) { 1908 for (NodeAddr<RefNode*> I : ReachedDefs) 1909 I.Addr->setSibling(0); 1910 for (NodeAddr<RefNode*> I : ReachedUses) 1911 I.Addr->setSibling(0); 1912 } 1913 for (NodeAddr<DefNode*> I : ReachedDefs) 1914 I.Addr->setReachingDef(RD); 1915 for (NodeAddr<UseNode*> I : ReachedUses) 1916 I.Addr->setReachingDef(RD); 1917 1918 NodeId Sib = DA.Addr->getSibling(); 1919 if (RD == 0) { 1920 assert(Sib == 0); 1921 return; 1922 } 1923 1924 // Update the reaching def node and remove DA from the sibling list. 1925 auto RDA = addr<DefNode*>(RD); 1926 auto TA = addr<DefNode*>(RDA.Addr->getReachedDef()); 1927 if (TA.Id == DA.Id) { 1928 // If DA is the first reached def, just update the RD's reached def 1929 // to the DA's sibling. 1930 RDA.Addr->setReachedDef(Sib); 1931 } else { 1932 // Otherwise, traverse the sibling list of the reached defs and remove 1933 // DA from it. 1934 while (TA.Id != 0) { 1935 NodeId S = TA.Addr->getSibling(); 1936 if (S == DA.Id) { 1937 TA.Addr->setSibling(Sib); 1938 break; 1939 } 1940 TA = addr<DefNode*>(S); 1941 } 1942 } 1943 1944 // Splice the DA's reached defs into the RDA's reached def chain. 1945 if (!ReachedDefs.empty()) { 1946 auto Last = NodeAddr<DefNode*>(ReachedDefs.back()); 1947 Last.Addr->setSibling(RDA.Addr->getReachedDef()); 1948 RDA.Addr->setReachedDef(ReachedDefs.front().Id); 1949 } 1950 // Splice the DA's reached uses into the RDA's reached use chain. 1951 if (!ReachedUses.empty()) { 1952 auto Last = NodeAddr<UseNode*>(ReachedUses.back()); 1953 Last.Addr->setSibling(RDA.Addr->getReachedUse()); 1954 RDA.Addr->setReachedUse(ReachedUses.front().Id); 1955 } 1956 } 1957