1 //===- RDFGraph.cpp -------------------------------------------------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // Target-independent, SSA-based data flow graph for register data flow (RDF). 10 // 11 #include "llvm/CodeGen/RDFGraph.h" 12 #include "llvm/ADT/BitVector.h" 13 #include "llvm/ADT/STLExtras.h" 14 #include "llvm/ADT/SetVector.h" 15 #include "llvm/CodeGen/MachineBasicBlock.h" 16 #include "llvm/CodeGen/MachineDominanceFrontier.h" 17 #include "llvm/CodeGen/MachineDominators.h" 18 #include "llvm/CodeGen/MachineFunction.h" 19 #include "llvm/CodeGen/MachineInstr.h" 20 #include "llvm/CodeGen/MachineOperand.h" 21 #include "llvm/CodeGen/MachineRegisterInfo.h" 22 #include "llvm/CodeGen/RDFRegisters.h" 23 #include "llvm/CodeGen/TargetInstrInfo.h" 24 #include "llvm/CodeGen/TargetLowering.h" 25 #include "llvm/CodeGen/TargetRegisterInfo.h" 26 #include "llvm/CodeGen/TargetSubtargetInfo.h" 27 #include "llvm/IR/Function.h" 28 #include "llvm/MC/LaneBitmask.h" 29 #include "llvm/MC/MCInstrDesc.h" 30 #include "llvm/Support/ErrorHandling.h" 31 #include "llvm/Support/raw_ostream.h" 32 #include <algorithm> 33 #include <cassert> 34 #include <cstdint> 35 #include <cstring> 36 #include <iterator> 37 #include <set> 38 #include <utility> 39 #include <vector> 40 41 using namespace llvm; 42 using namespace rdf; 43 44 // Printing functions. Have them here first, so that the rest of the code 45 // can use them. 46 namespace llvm { 47 namespace rdf { 48 49 raw_ostream &operator<< (raw_ostream &OS, const PrintLaneMaskOpt &P) { 50 if (!P.Mask.all()) 51 OS << ':' << PrintLaneMask(P.Mask); 52 return OS; 53 } 54 55 raw_ostream &operator<< (raw_ostream &OS, const Print<RegisterRef> &P) { 56 auto &TRI = P.G.getTRI(); 57 if (P.Obj.Reg > 0 && P.Obj.Reg < TRI.getNumRegs()) 58 OS << TRI.getName(P.Obj.Reg); 59 else 60 OS << '#' << P.Obj.Reg; 61 OS << PrintLaneMaskOpt(P.Obj.Mask); 62 return OS; 63 } 64 65 raw_ostream &operator<< (raw_ostream &OS, const Print<NodeId> &P) { 66 auto NA = P.G.addr<NodeBase*>(P.Obj); 67 uint16_t Attrs = NA.Addr->getAttrs(); 68 uint16_t Kind = NodeAttrs::kind(Attrs); 69 uint16_t Flags = NodeAttrs::flags(Attrs); 70 switch (NodeAttrs::type(Attrs)) { 71 case NodeAttrs::Code: 72 switch (Kind) { 73 case NodeAttrs::Func: OS << 'f'; break; 74 case NodeAttrs::Block: OS << 'b'; break; 75 case NodeAttrs::Stmt: OS << 's'; break; 76 case NodeAttrs::Phi: OS << 'p'; break; 77 default: OS << "c?"; break; 78 } 79 break; 80 case NodeAttrs::Ref: 81 if (Flags & NodeAttrs::Undef) 82 OS << '/'; 83 if (Flags & NodeAttrs::Dead) 84 OS << '\\'; 85 if (Flags & NodeAttrs::Preserving) 86 OS << '+'; 87 if (Flags & NodeAttrs::Clobbering) 88 OS << '~'; 89 switch (Kind) { 90 case NodeAttrs::Use: OS << 'u'; break; 91 case NodeAttrs::Def: OS << 'd'; break; 92 case NodeAttrs::Block: OS << 'b'; break; 93 default: OS << "r?"; break; 94 } 95 break; 96 default: 97 OS << '?'; 98 break; 99 } 100 OS << P.Obj; 101 if (Flags & NodeAttrs::Shadow) 102 OS << '"'; 103 return OS; 104 } 105 106 static void printRefHeader(raw_ostream &OS, const NodeAddr<RefNode*> RA, 107 const DataFlowGraph &G) { 108 OS << Print<NodeId>(RA.Id, G) << '<' 109 << Print<RegisterRef>(RA.Addr->getRegRef(G), G) << '>'; 110 if (RA.Addr->getFlags() & NodeAttrs::Fixed) 111 OS << '!'; 112 } 113 114 raw_ostream &operator<< (raw_ostream &OS, const Print<NodeAddr<DefNode*>> &P) { 115 printRefHeader(OS, P.Obj, P.G); 116 OS << '('; 117 if (NodeId N = P.Obj.Addr->getReachingDef()) 118 OS << Print<NodeId>(N, P.G); 119 OS << ','; 120 if (NodeId N = P.Obj.Addr->getReachedDef()) 121 OS << Print<NodeId>(N, P.G); 122 OS << ','; 123 if (NodeId N = P.Obj.Addr->getReachedUse()) 124 OS << Print<NodeId>(N, P.G); 125 OS << "):"; 126 if (NodeId N = P.Obj.Addr->getSibling()) 127 OS << Print<NodeId>(N, P.G); 128 return OS; 129 } 130 131 raw_ostream &operator<< (raw_ostream &OS, const Print<NodeAddr<UseNode*>> &P) { 132 printRefHeader(OS, P.Obj, P.G); 133 OS << '('; 134 if (NodeId N = P.Obj.Addr->getReachingDef()) 135 OS << Print<NodeId>(N, P.G); 136 OS << "):"; 137 if (NodeId N = P.Obj.Addr->getSibling()) 138 OS << Print<NodeId>(N, P.G); 139 return OS; 140 } 141 142 raw_ostream &operator<< (raw_ostream &OS, 143 const Print<NodeAddr<PhiUseNode*>> &P) { 144 printRefHeader(OS, P.Obj, P.G); 145 OS << '('; 146 if (NodeId N = P.Obj.Addr->getReachingDef()) 147 OS << Print<NodeId>(N, P.G); 148 OS << ','; 149 if (NodeId N = P.Obj.Addr->getPredecessor()) 150 OS << Print<NodeId>(N, P.G); 151 OS << "):"; 152 if (NodeId N = P.Obj.Addr->getSibling()) 153 OS << Print<NodeId>(N, P.G); 154 return OS; 155 } 156 157 raw_ostream &operator<< (raw_ostream &OS, const Print<NodeAddr<RefNode*>> &P) { 158 switch (P.Obj.Addr->getKind()) { 159 case NodeAttrs::Def: 160 OS << PrintNode<DefNode*>(P.Obj, P.G); 161 break; 162 case NodeAttrs::Use: 163 if (P.Obj.Addr->getFlags() & NodeAttrs::PhiRef) 164 OS << PrintNode<PhiUseNode*>(P.Obj, P.G); 165 else 166 OS << PrintNode<UseNode*>(P.Obj, P.G); 167 break; 168 } 169 return OS; 170 } 171 172 raw_ostream &operator<< (raw_ostream &OS, const Print<NodeList> &P) { 173 unsigned N = P.Obj.size(); 174 for (auto I : P.Obj) { 175 OS << Print<NodeId>(I.Id, P.G); 176 if (--N) 177 OS << ' '; 178 } 179 return OS; 180 } 181 182 raw_ostream &operator<< (raw_ostream &OS, const Print<NodeSet> &P) { 183 unsigned N = P.Obj.size(); 184 for (auto I : P.Obj) { 185 OS << Print<NodeId>(I, P.G); 186 if (--N) 187 OS << ' '; 188 } 189 return OS; 190 } 191 192 namespace { 193 194 template <typename T> 195 struct PrintListV { 196 PrintListV(const NodeList &L, const DataFlowGraph &G) : List(L), G(G) {} 197 198 using Type = T; 199 const NodeList &List; 200 const DataFlowGraph &G; 201 }; 202 203 template <typename T> 204 raw_ostream &operator<< (raw_ostream &OS, const PrintListV<T> &P) { 205 unsigned N = P.List.size(); 206 for (NodeAddr<T> A : P.List) { 207 OS << PrintNode<T>(A, P.G); 208 if (--N) 209 OS << ", "; 210 } 211 return OS; 212 } 213 214 } // end anonymous namespace 215 216 raw_ostream &operator<< (raw_ostream &OS, const Print<NodeAddr<PhiNode*>> &P) { 217 OS << Print<NodeId>(P.Obj.Id, P.G) << ": phi [" 218 << PrintListV<RefNode*>(P.Obj.Addr->members(P.G), P.G) << ']'; 219 return OS; 220 } 221 222 raw_ostream &operator<<(raw_ostream &OS, const Print<NodeAddr<StmtNode *>> &P) { 223 const MachineInstr &MI = *P.Obj.Addr->getCode(); 224 unsigned Opc = MI.getOpcode(); 225 OS << Print<NodeId>(P.Obj.Id, P.G) << ": " << P.G.getTII().getName(Opc); 226 // Print the target for calls and branches (for readability). 227 if (MI.isCall() || MI.isBranch()) { 228 MachineInstr::const_mop_iterator T = 229 llvm::find_if(MI.operands(), 230 [] (const MachineOperand &Op) -> bool { 231 return Op.isMBB() || Op.isGlobal() || Op.isSymbol(); 232 }); 233 if (T != MI.operands_end()) { 234 OS << ' '; 235 if (T->isMBB()) 236 OS << printMBBReference(*T->getMBB()); 237 else if (T->isGlobal()) 238 OS << T->getGlobal()->getName(); 239 else if (T->isSymbol()) 240 OS << T->getSymbolName(); 241 } 242 } 243 OS << " [" << PrintListV<RefNode*>(P.Obj.Addr->members(P.G), P.G) << ']'; 244 return OS; 245 } 246 247 raw_ostream &operator<< (raw_ostream &OS, 248 const Print<NodeAddr<InstrNode*>> &P) { 249 switch (P.Obj.Addr->getKind()) { 250 case NodeAttrs::Phi: 251 OS << PrintNode<PhiNode*>(P.Obj, P.G); 252 break; 253 case NodeAttrs::Stmt: 254 OS << PrintNode<StmtNode*>(P.Obj, P.G); 255 break; 256 default: 257 OS << "instr? " << Print<NodeId>(P.Obj.Id, P.G); 258 break; 259 } 260 return OS; 261 } 262 263 raw_ostream &operator<< (raw_ostream &OS, 264 const Print<NodeAddr<BlockNode*>> &P) { 265 MachineBasicBlock *BB = P.Obj.Addr->getCode(); 266 unsigned NP = BB->pred_size(); 267 std::vector<int> Ns; 268 auto PrintBBs = [&OS] (std::vector<int> Ns) -> void { 269 unsigned N = Ns.size(); 270 for (int I : Ns) { 271 OS << "%bb." << I; 272 if (--N) 273 OS << ", "; 274 } 275 }; 276 277 OS << Print<NodeId>(P.Obj.Id, P.G) << ": --- " << printMBBReference(*BB) 278 << " --- preds(" << NP << "): "; 279 for (MachineBasicBlock *B : BB->predecessors()) 280 Ns.push_back(B->getNumber()); 281 PrintBBs(Ns); 282 283 unsigned NS = BB->succ_size(); 284 OS << " succs(" << NS << "): "; 285 Ns.clear(); 286 for (MachineBasicBlock *B : BB->successors()) 287 Ns.push_back(B->getNumber()); 288 PrintBBs(Ns); 289 OS << '\n'; 290 291 for (auto I : P.Obj.Addr->members(P.G)) 292 OS << PrintNode<InstrNode*>(I, P.G) << '\n'; 293 return OS; 294 } 295 296 raw_ostream &operator<<(raw_ostream &OS, const Print<NodeAddr<FuncNode *>> &P) { 297 OS << "DFG dump:[\n" << Print<NodeId>(P.Obj.Id, P.G) << ": Function: " 298 << P.Obj.Addr->getCode()->getName() << '\n'; 299 for (auto I : P.Obj.Addr->members(P.G)) 300 OS << PrintNode<BlockNode*>(I, P.G) << '\n'; 301 OS << "]\n"; 302 return OS; 303 } 304 305 raw_ostream &operator<< (raw_ostream &OS, const Print<RegisterSet> &P) { 306 OS << '{'; 307 for (auto I : P.Obj) 308 OS << ' ' << Print<RegisterRef>(I, P.G); 309 OS << " }"; 310 return OS; 311 } 312 313 raw_ostream &operator<< (raw_ostream &OS, const Print<RegisterAggr> &P) { 314 P.Obj.print(OS); 315 return OS; 316 } 317 318 raw_ostream &operator<< (raw_ostream &OS, 319 const Print<DataFlowGraph::DefStack> &P) { 320 for (auto I = P.Obj.top(), E = P.Obj.bottom(); I != E; ) { 321 OS << Print<NodeId>(I->Id, P.G) 322 << '<' << Print<RegisterRef>(I->Addr->getRegRef(P.G), P.G) << '>'; 323 I.down(); 324 if (I != E) 325 OS << ' '; 326 } 327 return OS; 328 } 329 330 } // end namespace rdf 331 } // end namespace llvm 332 333 // Node allocation functions. 334 // 335 // Node allocator is like a slab memory allocator: it allocates blocks of 336 // memory in sizes that are multiples of the size of a node. Each block has 337 // the same size. Nodes are allocated from the currently active block, and 338 // when it becomes full, a new one is created. 339 // There is a mapping scheme between node id and its location in a block, 340 // and within that block is described in the header file. 341 // 342 void NodeAllocator::startNewBlock() { 343 void *T = MemPool.Allocate(NodesPerBlock*NodeMemSize, NodeMemSize); 344 char *P = static_cast<char*>(T); 345 Blocks.push_back(P); 346 // Check if the block index is still within the allowed range, i.e. less 347 // than 2^N, where N is the number of bits in NodeId for the block index. 348 // BitsPerIndex is the number of bits per node index. 349 assert((Blocks.size() < ((size_t)1 << (8*sizeof(NodeId)-BitsPerIndex))) && 350 "Out of bits for block index"); 351 ActiveEnd = P; 352 } 353 354 bool NodeAllocator::needNewBlock() { 355 if (Blocks.empty()) 356 return true; 357 358 char *ActiveBegin = Blocks.back(); 359 uint32_t Index = (ActiveEnd-ActiveBegin)/NodeMemSize; 360 return Index >= NodesPerBlock; 361 } 362 363 NodeAddr<NodeBase*> NodeAllocator::New() { 364 if (needNewBlock()) 365 startNewBlock(); 366 367 uint32_t ActiveB = Blocks.size()-1; 368 uint32_t Index = (ActiveEnd - Blocks[ActiveB])/NodeMemSize; 369 NodeAddr<NodeBase*> NA = { reinterpret_cast<NodeBase*>(ActiveEnd), 370 makeId(ActiveB, Index) }; 371 ActiveEnd += NodeMemSize; 372 return NA; 373 } 374 375 NodeId NodeAllocator::id(const NodeBase *P) const { 376 uintptr_t A = reinterpret_cast<uintptr_t>(P); 377 for (unsigned i = 0, n = Blocks.size(); i != n; ++i) { 378 uintptr_t B = reinterpret_cast<uintptr_t>(Blocks[i]); 379 if (A < B || A >= B + NodesPerBlock*NodeMemSize) 380 continue; 381 uint32_t Idx = (A-B)/NodeMemSize; 382 return makeId(i, Idx); 383 } 384 llvm_unreachable("Invalid node address"); 385 } 386 387 void NodeAllocator::clear() { 388 MemPool.Reset(); 389 Blocks.clear(); 390 ActiveEnd = nullptr; 391 } 392 393 // Insert node NA after "this" in the circular chain. 394 void NodeBase::append(NodeAddr<NodeBase*> NA) { 395 NodeId Nx = Next; 396 // If NA is already "next", do nothing. 397 if (Next != NA.Id) { 398 Next = NA.Id; 399 NA.Addr->Next = Nx; 400 } 401 } 402 403 // Fundamental node manipulator functions. 404 405 // Obtain the register reference from a reference node. 406 RegisterRef RefNode::getRegRef(const DataFlowGraph &G) const { 407 assert(NodeAttrs::type(Attrs) == NodeAttrs::Ref); 408 if (NodeAttrs::flags(Attrs) & NodeAttrs::PhiRef) 409 return G.unpack(Ref.PR); 410 assert(Ref.Op != nullptr); 411 return G.makeRegRef(*Ref.Op); 412 } 413 414 // Set the register reference in the reference node directly (for references 415 // in phi nodes). 416 void RefNode::setRegRef(RegisterRef RR, DataFlowGraph &G) { 417 assert(NodeAttrs::type(Attrs) == NodeAttrs::Ref); 418 assert(NodeAttrs::flags(Attrs) & NodeAttrs::PhiRef); 419 Ref.PR = G.pack(RR); 420 } 421 422 // Set the register reference in the reference node based on a machine 423 // operand (for references in statement nodes). 424 void RefNode::setRegRef(MachineOperand *Op, DataFlowGraph &G) { 425 assert(NodeAttrs::type(Attrs) == NodeAttrs::Ref); 426 assert(!(NodeAttrs::flags(Attrs) & NodeAttrs::PhiRef)); 427 (void)G; 428 Ref.Op = Op; 429 } 430 431 // Get the owner of a given reference node. 432 NodeAddr<NodeBase*> RefNode::getOwner(const DataFlowGraph &G) { 433 NodeAddr<NodeBase*> NA = G.addr<NodeBase*>(getNext()); 434 435 while (NA.Addr != this) { 436 if (NA.Addr->getType() == NodeAttrs::Code) 437 return NA; 438 NA = G.addr<NodeBase*>(NA.Addr->getNext()); 439 } 440 llvm_unreachable("No owner in circular list"); 441 } 442 443 // Connect the def node to the reaching def node. 444 void DefNode::linkToDef(NodeId Self, NodeAddr<DefNode*> DA) { 445 Ref.RD = DA.Id; 446 Ref.Sib = DA.Addr->getReachedDef(); 447 DA.Addr->setReachedDef(Self); 448 } 449 450 // Connect the use node to the reaching def node. 451 void UseNode::linkToDef(NodeId Self, NodeAddr<DefNode*> DA) { 452 Ref.RD = DA.Id; 453 Ref.Sib = DA.Addr->getReachedUse(); 454 DA.Addr->setReachedUse(Self); 455 } 456 457 // Get the first member of the code node. 458 NodeAddr<NodeBase*> CodeNode::getFirstMember(const DataFlowGraph &G) const { 459 if (Code.FirstM == 0) 460 return NodeAddr<NodeBase*>(); 461 return G.addr<NodeBase*>(Code.FirstM); 462 } 463 464 // Get the last member of the code node. 465 NodeAddr<NodeBase*> CodeNode::getLastMember(const DataFlowGraph &G) const { 466 if (Code.LastM == 0) 467 return NodeAddr<NodeBase*>(); 468 return G.addr<NodeBase*>(Code.LastM); 469 } 470 471 // Add node NA at the end of the member list of the given code node. 472 void CodeNode::addMember(NodeAddr<NodeBase*> NA, const DataFlowGraph &G) { 473 NodeAddr<NodeBase*> ML = getLastMember(G); 474 if (ML.Id != 0) { 475 ML.Addr->append(NA); 476 } else { 477 Code.FirstM = NA.Id; 478 NodeId Self = G.id(this); 479 NA.Addr->setNext(Self); 480 } 481 Code.LastM = NA.Id; 482 } 483 484 // Add node NA after member node MA in the given code node. 485 void CodeNode::addMemberAfter(NodeAddr<NodeBase*> MA, NodeAddr<NodeBase*> NA, 486 const DataFlowGraph &G) { 487 MA.Addr->append(NA); 488 if (Code.LastM == MA.Id) 489 Code.LastM = NA.Id; 490 } 491 492 // Remove member node NA from the given code node. 493 void CodeNode::removeMember(NodeAddr<NodeBase*> NA, const DataFlowGraph &G) { 494 NodeAddr<NodeBase*> MA = getFirstMember(G); 495 assert(MA.Id != 0); 496 497 // Special handling if the member to remove is the first member. 498 if (MA.Id == NA.Id) { 499 if (Code.LastM == MA.Id) { 500 // If it is the only member, set both first and last to 0. 501 Code.FirstM = Code.LastM = 0; 502 } else { 503 // Otherwise, advance the first member. 504 Code.FirstM = MA.Addr->getNext(); 505 } 506 return; 507 } 508 509 while (MA.Addr != this) { 510 NodeId MX = MA.Addr->getNext(); 511 if (MX == NA.Id) { 512 MA.Addr->setNext(NA.Addr->getNext()); 513 // If the member to remove happens to be the last one, update the 514 // LastM indicator. 515 if (Code.LastM == NA.Id) 516 Code.LastM = MA.Id; 517 return; 518 } 519 MA = G.addr<NodeBase*>(MX); 520 } 521 llvm_unreachable("No such member"); 522 } 523 524 // Return the list of all members of the code node. 525 NodeList CodeNode::members(const DataFlowGraph &G) const { 526 static auto True = [] (NodeAddr<NodeBase*>) -> bool { return true; }; 527 return members_if(True, G); 528 } 529 530 // Return the owner of the given instr node. 531 NodeAddr<NodeBase*> InstrNode::getOwner(const DataFlowGraph &G) { 532 NodeAddr<NodeBase*> NA = G.addr<NodeBase*>(getNext()); 533 534 while (NA.Addr != this) { 535 assert(NA.Addr->getType() == NodeAttrs::Code); 536 if (NA.Addr->getKind() == NodeAttrs::Block) 537 return NA; 538 NA = G.addr<NodeBase*>(NA.Addr->getNext()); 539 } 540 llvm_unreachable("No owner in circular list"); 541 } 542 543 // Add the phi node PA to the given block node. 544 void BlockNode::addPhi(NodeAddr<PhiNode*> PA, const DataFlowGraph &G) { 545 NodeAddr<NodeBase*> M = getFirstMember(G); 546 if (M.Id == 0) { 547 addMember(PA, G); 548 return; 549 } 550 551 assert(M.Addr->getType() == NodeAttrs::Code); 552 if (M.Addr->getKind() == NodeAttrs::Stmt) { 553 // If the first member of the block is a statement, insert the phi as 554 // the first member. 555 Code.FirstM = PA.Id; 556 PA.Addr->setNext(M.Id); 557 } else { 558 // If the first member is a phi, find the last phi, and append PA to it. 559 assert(M.Addr->getKind() == NodeAttrs::Phi); 560 NodeAddr<NodeBase*> MN = M; 561 do { 562 M = MN; 563 MN = G.addr<NodeBase*>(M.Addr->getNext()); 564 assert(MN.Addr->getType() == NodeAttrs::Code); 565 } while (MN.Addr->getKind() == NodeAttrs::Phi); 566 567 // M is the last phi. 568 addMemberAfter(M, PA, G); 569 } 570 } 571 572 // Find the block node corresponding to the machine basic block BB in the 573 // given func node. 574 NodeAddr<BlockNode*> FuncNode::findBlock(const MachineBasicBlock *BB, 575 const DataFlowGraph &G) const { 576 auto EqBB = [BB] (NodeAddr<NodeBase*> NA) -> bool { 577 return NodeAddr<BlockNode*>(NA).Addr->getCode() == BB; 578 }; 579 NodeList Ms = members_if(EqBB, G); 580 if (!Ms.empty()) 581 return Ms[0]; 582 return NodeAddr<BlockNode*>(); 583 } 584 585 // Get the block node for the entry block in the given function. 586 NodeAddr<BlockNode*> FuncNode::getEntryBlock(const DataFlowGraph &G) { 587 MachineBasicBlock *EntryB = &getCode()->front(); 588 return findBlock(EntryB, G); 589 } 590 591 // Target operand information. 592 // 593 594 // For a given instruction, check if there are any bits of RR that can remain 595 // unchanged across this def. 596 bool TargetOperandInfo::isPreserving(const MachineInstr &In, unsigned OpNum) 597 const { 598 return TII.isPredicated(In); 599 } 600 601 // Check if the definition of RR produces an unspecified value. 602 bool TargetOperandInfo::isClobbering(const MachineInstr &In, unsigned OpNum) 603 const { 604 const MachineOperand &Op = In.getOperand(OpNum); 605 if (Op.isRegMask()) 606 return true; 607 assert(Op.isReg()); 608 if (In.isCall()) 609 if (Op.isDef() && Op.isDead()) 610 return true; 611 return false; 612 } 613 614 // Check if the given instruction specifically requires 615 bool TargetOperandInfo::isFixedReg(const MachineInstr &In, unsigned OpNum) 616 const { 617 if (In.isCall() || In.isReturn() || In.isInlineAsm()) 618 return true; 619 // Check for a tail call. 620 if (In.isBranch()) 621 for (const MachineOperand &O : In.operands()) 622 if (O.isGlobal() || O.isSymbol()) 623 return true; 624 625 const MCInstrDesc &D = In.getDesc(); 626 if (!D.getImplicitDefs() && !D.getImplicitUses()) 627 return false; 628 const MachineOperand &Op = In.getOperand(OpNum); 629 // If there is a sub-register, treat the operand as non-fixed. Currently, 630 // fixed registers are those that are listed in the descriptor as implicit 631 // uses or defs, and those lists do not allow sub-registers. 632 if (Op.getSubReg() != 0) 633 return false; 634 Register Reg = Op.getReg(); 635 const MCPhysReg *ImpR = Op.isDef() ? D.getImplicitDefs() 636 : D.getImplicitUses(); 637 if (!ImpR) 638 return false; 639 while (*ImpR) 640 if (*ImpR++ == Reg) 641 return true; 642 return false; 643 } 644 645 // 646 // The data flow graph construction. 647 // 648 649 DataFlowGraph::DataFlowGraph(MachineFunction &mf, const TargetInstrInfo &tii, 650 const TargetRegisterInfo &tri, const MachineDominatorTree &mdt, 651 const MachineDominanceFrontier &mdf, const TargetOperandInfo &toi) 652 : MF(mf), TII(tii), TRI(tri), PRI(tri, mf), MDT(mdt), MDF(mdf), TOI(toi), 653 LiveIns(PRI) { 654 } 655 656 // The implementation of the definition stack. 657 // Each register reference has its own definition stack. In particular, 658 // for a register references "Reg" and "Reg:subreg" will each have their 659 // own definition stacks. 660 661 // Construct a stack iterator. 662 DataFlowGraph::DefStack::Iterator::Iterator(const DataFlowGraph::DefStack &S, 663 bool Top) : DS(S) { 664 if (!Top) { 665 // Initialize to bottom. 666 Pos = 0; 667 return; 668 } 669 // Initialize to the top, i.e. top-most non-delimiter (or 0, if empty). 670 Pos = DS.Stack.size(); 671 while (Pos > 0 && DS.isDelimiter(DS.Stack[Pos-1])) 672 Pos--; 673 } 674 675 // Return the size of the stack, including block delimiters. 676 unsigned DataFlowGraph::DefStack::size() const { 677 unsigned S = 0; 678 for (auto I = top(), E = bottom(); I != E; I.down()) 679 S++; 680 return S; 681 } 682 683 // Remove the top entry from the stack. Remove all intervening delimiters 684 // so that after this, the stack is either empty, or the top of the stack 685 // is a non-delimiter. 686 void DataFlowGraph::DefStack::pop() { 687 assert(!empty()); 688 unsigned P = nextDown(Stack.size()); 689 Stack.resize(P); 690 } 691 692 // Push a delimiter for block node N on the stack. 693 void DataFlowGraph::DefStack::start_block(NodeId N) { 694 assert(N != 0); 695 Stack.push_back(NodeAddr<DefNode*>(nullptr, N)); 696 } 697 698 // Remove all nodes from the top of the stack, until the delimited for 699 // block node N is encountered. Remove the delimiter as well. In effect, 700 // this will remove from the stack all definitions from block N. 701 void DataFlowGraph::DefStack::clear_block(NodeId N) { 702 assert(N != 0); 703 unsigned P = Stack.size(); 704 while (P > 0) { 705 bool Found = isDelimiter(Stack[P-1], N); 706 P--; 707 if (Found) 708 break; 709 } 710 // This will also remove the delimiter, if found. 711 Stack.resize(P); 712 } 713 714 // Move the stack iterator up by one. 715 unsigned DataFlowGraph::DefStack::nextUp(unsigned P) const { 716 // Get the next valid position after P (skipping all delimiters). 717 // The input position P does not have to point to a non-delimiter. 718 unsigned SS = Stack.size(); 719 bool IsDelim; 720 assert(P < SS); 721 do { 722 P++; 723 IsDelim = isDelimiter(Stack[P-1]); 724 } while (P < SS && IsDelim); 725 assert(!IsDelim); 726 return P; 727 } 728 729 // Move the stack iterator down by one. 730 unsigned DataFlowGraph::DefStack::nextDown(unsigned P) const { 731 // Get the preceding valid position before P (skipping all delimiters). 732 // The input position P does not have to point to a non-delimiter. 733 assert(P > 0 && P <= Stack.size()); 734 bool IsDelim = isDelimiter(Stack[P-1]); 735 do { 736 if (--P == 0) 737 break; 738 IsDelim = isDelimiter(Stack[P-1]); 739 } while (P > 0 && IsDelim); 740 assert(!IsDelim); 741 return P; 742 } 743 744 // Register information. 745 746 RegisterSet DataFlowGraph::getLandingPadLiveIns() const { 747 RegisterSet LR; 748 const Function &F = MF.getFunction(); 749 const Constant *PF = F.hasPersonalityFn() ? F.getPersonalityFn() 750 : nullptr; 751 const TargetLowering &TLI = *MF.getSubtarget().getTargetLowering(); 752 if (RegisterId R = TLI.getExceptionPointerRegister(PF)) 753 LR.insert(RegisterRef(R)); 754 if (!isFuncletEHPersonality(classifyEHPersonality(PF))) { 755 if (RegisterId R = TLI.getExceptionSelectorRegister(PF)) 756 LR.insert(RegisterRef(R)); 757 } 758 return LR; 759 } 760 761 // Node management functions. 762 763 // Get the pointer to the node with the id N. 764 NodeBase *DataFlowGraph::ptr(NodeId N) const { 765 if (N == 0) 766 return nullptr; 767 return Memory.ptr(N); 768 } 769 770 // Get the id of the node at the address P. 771 NodeId DataFlowGraph::id(const NodeBase *P) const { 772 if (P == nullptr) 773 return 0; 774 return Memory.id(P); 775 } 776 777 // Allocate a new node and set the attributes to Attrs. 778 NodeAddr<NodeBase*> DataFlowGraph::newNode(uint16_t Attrs) { 779 NodeAddr<NodeBase*> P = Memory.New(); 780 P.Addr->init(); 781 P.Addr->setAttrs(Attrs); 782 return P; 783 } 784 785 // Make a copy of the given node B, except for the data-flow links, which 786 // are set to 0. 787 NodeAddr<NodeBase*> DataFlowGraph::cloneNode(const NodeAddr<NodeBase*> B) { 788 NodeAddr<NodeBase*> NA = newNode(0); 789 memcpy(NA.Addr, B.Addr, sizeof(NodeBase)); 790 // Ref nodes need to have the data-flow links reset. 791 if (NA.Addr->getType() == NodeAttrs::Ref) { 792 NodeAddr<RefNode*> RA = NA; 793 RA.Addr->setReachingDef(0); 794 RA.Addr->setSibling(0); 795 if (NA.Addr->getKind() == NodeAttrs::Def) { 796 NodeAddr<DefNode*> DA = NA; 797 DA.Addr->setReachedDef(0); 798 DA.Addr->setReachedUse(0); 799 } 800 } 801 return NA; 802 } 803 804 // Allocation routines for specific node types/kinds. 805 806 NodeAddr<UseNode*> DataFlowGraph::newUse(NodeAddr<InstrNode*> Owner, 807 MachineOperand &Op, uint16_t Flags) { 808 NodeAddr<UseNode*> UA = newNode(NodeAttrs::Ref | NodeAttrs::Use | Flags); 809 UA.Addr->setRegRef(&Op, *this); 810 return UA; 811 } 812 813 NodeAddr<PhiUseNode*> DataFlowGraph::newPhiUse(NodeAddr<PhiNode*> Owner, 814 RegisterRef RR, NodeAddr<BlockNode*> PredB, uint16_t Flags) { 815 NodeAddr<PhiUseNode*> PUA = newNode(NodeAttrs::Ref | NodeAttrs::Use | Flags); 816 assert(Flags & NodeAttrs::PhiRef); 817 PUA.Addr->setRegRef(RR, *this); 818 PUA.Addr->setPredecessor(PredB.Id); 819 return PUA; 820 } 821 822 NodeAddr<DefNode*> DataFlowGraph::newDef(NodeAddr<InstrNode*> Owner, 823 MachineOperand &Op, uint16_t Flags) { 824 NodeAddr<DefNode*> DA = newNode(NodeAttrs::Ref | NodeAttrs::Def | Flags); 825 DA.Addr->setRegRef(&Op, *this); 826 return DA; 827 } 828 829 NodeAddr<DefNode*> DataFlowGraph::newDef(NodeAddr<InstrNode*> Owner, 830 RegisterRef RR, uint16_t Flags) { 831 NodeAddr<DefNode*> DA = newNode(NodeAttrs::Ref | NodeAttrs::Def | Flags); 832 assert(Flags & NodeAttrs::PhiRef); 833 DA.Addr->setRegRef(RR, *this); 834 return DA; 835 } 836 837 NodeAddr<PhiNode*> DataFlowGraph::newPhi(NodeAddr<BlockNode*> Owner) { 838 NodeAddr<PhiNode*> PA = newNode(NodeAttrs::Code | NodeAttrs::Phi); 839 Owner.Addr->addPhi(PA, *this); 840 return PA; 841 } 842 843 NodeAddr<StmtNode*> DataFlowGraph::newStmt(NodeAddr<BlockNode*> Owner, 844 MachineInstr *MI) { 845 NodeAddr<StmtNode*> SA = newNode(NodeAttrs::Code | NodeAttrs::Stmt); 846 SA.Addr->setCode(MI); 847 Owner.Addr->addMember(SA, *this); 848 return SA; 849 } 850 851 NodeAddr<BlockNode*> DataFlowGraph::newBlock(NodeAddr<FuncNode*> Owner, 852 MachineBasicBlock *BB) { 853 NodeAddr<BlockNode*> BA = newNode(NodeAttrs::Code | NodeAttrs::Block); 854 BA.Addr->setCode(BB); 855 Owner.Addr->addMember(BA, *this); 856 return BA; 857 } 858 859 NodeAddr<FuncNode*> DataFlowGraph::newFunc(MachineFunction *MF) { 860 NodeAddr<FuncNode*> FA = newNode(NodeAttrs::Code | NodeAttrs::Func); 861 FA.Addr->setCode(MF); 862 return FA; 863 } 864 865 // Build the data flow graph. 866 void DataFlowGraph::build(unsigned Options) { 867 reset(); 868 Func = newFunc(&MF); 869 870 if (MF.empty()) 871 return; 872 873 for (MachineBasicBlock &B : MF) { 874 NodeAddr<BlockNode*> BA = newBlock(Func, &B); 875 BlockNodes.insert(std::make_pair(&B, BA)); 876 for (MachineInstr &I : B) { 877 if (I.isDebugInstr()) 878 continue; 879 buildStmt(BA, I); 880 } 881 } 882 883 NodeAddr<BlockNode*> EA = Func.Addr->getEntryBlock(*this); 884 NodeList Blocks = Func.Addr->members(*this); 885 886 // Collect information about block references. 887 RegisterSet AllRefs; 888 for (NodeAddr<BlockNode*> BA : Blocks) 889 for (NodeAddr<InstrNode*> IA : BA.Addr->members(*this)) 890 for (NodeAddr<RefNode*> RA : IA.Addr->members(*this)) 891 AllRefs.insert(RA.Addr->getRegRef(*this)); 892 893 // Collect function live-ins and entry block live-ins. 894 MachineRegisterInfo &MRI = MF.getRegInfo(); 895 MachineBasicBlock &EntryB = *EA.Addr->getCode(); 896 assert(EntryB.pred_empty() && "Function entry block has predecessors"); 897 for (std::pair<unsigned,unsigned> P : MRI.liveins()) 898 LiveIns.insert(RegisterRef(P.first)); 899 if (MRI.tracksLiveness()) { 900 for (auto I : EntryB.liveins()) 901 LiveIns.insert(RegisterRef(I.PhysReg, I.LaneMask)); 902 } 903 904 // Add function-entry phi nodes for the live-in registers. 905 //for (std::pair<RegisterId,LaneBitmask> P : LiveIns) { 906 for (auto I = LiveIns.rr_begin(), E = LiveIns.rr_end(); I != E; ++I) { 907 RegisterRef RR = *I; 908 NodeAddr<PhiNode*> PA = newPhi(EA); 909 uint16_t PhiFlags = NodeAttrs::PhiRef | NodeAttrs::Preserving; 910 NodeAddr<DefNode*> DA = newDef(PA, RR, PhiFlags); 911 PA.Addr->addMember(DA, *this); 912 } 913 914 // Add phis for landing pads. 915 // Landing pads, unlike usual backs blocks, are not entered through 916 // branches in the program, or fall-throughs from other blocks. They 917 // are entered from the exception handling runtime and target's ABI 918 // may define certain registers as defined on entry to such a block. 919 RegisterSet EHRegs = getLandingPadLiveIns(); 920 if (!EHRegs.empty()) { 921 for (NodeAddr<BlockNode*> BA : Blocks) { 922 const MachineBasicBlock &B = *BA.Addr->getCode(); 923 if (!B.isEHPad()) 924 continue; 925 926 // Prepare a list of NodeIds of the block's predecessors. 927 NodeList Preds; 928 for (MachineBasicBlock *PB : B.predecessors()) 929 Preds.push_back(findBlock(PB)); 930 931 // Build phi nodes for each live-in. 932 for (RegisterRef RR : EHRegs) { 933 NodeAddr<PhiNode*> PA = newPhi(BA); 934 uint16_t PhiFlags = NodeAttrs::PhiRef | NodeAttrs::Preserving; 935 // Add def: 936 NodeAddr<DefNode*> DA = newDef(PA, RR, PhiFlags); 937 PA.Addr->addMember(DA, *this); 938 // Add uses (no reaching defs for phi uses): 939 for (NodeAddr<BlockNode*> PBA : Preds) { 940 NodeAddr<PhiUseNode*> PUA = newPhiUse(PA, RR, PBA); 941 PA.Addr->addMember(PUA, *this); 942 } 943 } 944 } 945 } 946 947 // Build a map "PhiM" which will contain, for each block, the set 948 // of references that will require phi definitions in that block. 949 BlockRefsMap PhiM; 950 for (NodeAddr<BlockNode*> BA : Blocks) 951 recordDefsForDF(PhiM, BA); 952 for (NodeAddr<BlockNode*> BA : Blocks) 953 buildPhis(PhiM, AllRefs, BA); 954 955 // Link all the refs. This will recursively traverse the dominator tree. 956 DefStackMap DM; 957 linkBlockRefs(DM, EA); 958 959 // Finally, remove all unused phi nodes. 960 if (!(Options & BuildOptions::KeepDeadPhis)) 961 removeUnusedPhis(); 962 } 963 964 RegisterRef DataFlowGraph::makeRegRef(unsigned Reg, unsigned Sub) const { 965 assert(PhysicalRegisterInfo::isRegMaskId(Reg) || 966 Register::isPhysicalRegister(Reg)); 967 assert(Reg != 0); 968 if (Sub != 0) 969 Reg = TRI.getSubReg(Reg, Sub); 970 return RegisterRef(Reg); 971 } 972 973 RegisterRef DataFlowGraph::makeRegRef(const MachineOperand &Op) const { 974 assert(Op.isReg() || Op.isRegMask()); 975 if (Op.isReg()) 976 return makeRegRef(Op.getReg(), Op.getSubReg()); 977 return RegisterRef(PRI.getRegMaskId(Op.getRegMask()), LaneBitmask::getAll()); 978 } 979 980 RegisterRef DataFlowGraph::restrictRef(RegisterRef AR, RegisterRef BR) const { 981 if (AR.Reg == BR.Reg) { 982 LaneBitmask M = AR.Mask & BR.Mask; 983 return M.any() ? RegisterRef(AR.Reg, M) : RegisterRef(); 984 } 985 // This isn't strictly correct, because the overlap may happen in the 986 // part masked out. 987 if (PRI.alias(AR, BR)) 988 return AR; 989 return RegisterRef(); 990 } 991 992 // For each stack in the map DefM, push the delimiter for block B on it. 993 void DataFlowGraph::markBlock(NodeId B, DefStackMap &DefM) { 994 // Push block delimiters. 995 for (auto &P : DefM) 996 P.second.start_block(B); 997 } 998 999 // Remove all definitions coming from block B from each stack in DefM. 1000 void DataFlowGraph::releaseBlock(NodeId B, DefStackMap &DefM) { 1001 // Pop all defs from this block from the definition stack. Defs that were 1002 // added to the map during the traversal of instructions will not have a 1003 // delimiter, but for those, the whole stack will be emptied. 1004 for (auto &P : DefM) 1005 P.second.clear_block(B); 1006 1007 // Finally, remove empty stacks from the map. 1008 for (auto I = DefM.begin(), E = DefM.end(), NextI = I; I != E; I = NextI) { 1009 NextI = std::next(I); 1010 // This preserves the validity of iterators other than I. 1011 if (I->second.empty()) 1012 DefM.erase(I); 1013 } 1014 } 1015 1016 // Push all definitions from the instruction node IA to an appropriate 1017 // stack in DefM. 1018 void DataFlowGraph::pushAllDefs(NodeAddr<InstrNode*> IA, DefStackMap &DefM) { 1019 pushClobbers(IA, DefM); 1020 pushDefs(IA, DefM); 1021 } 1022 1023 // Push all definitions from the instruction node IA to an appropriate 1024 // stack in DefM. 1025 void DataFlowGraph::pushClobbers(NodeAddr<InstrNode*> IA, DefStackMap &DefM) { 1026 NodeSet Visited; 1027 std::set<RegisterId> Defined; 1028 1029 // The important objectives of this function are: 1030 // - to be able to handle instructions both while the graph is being 1031 // constructed, and after the graph has been constructed, and 1032 // - maintain proper ordering of definitions on the stack for each 1033 // register reference: 1034 // - if there are two or more related defs in IA (i.e. coming from 1035 // the same machine operand), then only push one def on the stack, 1036 // - if there are multiple unrelated defs of non-overlapping 1037 // subregisters of S, then the stack for S will have both (in an 1038 // unspecified order), but the order does not matter from the data- 1039 // -flow perspective. 1040 1041 for (NodeAddr<DefNode*> DA : IA.Addr->members_if(IsDef, *this)) { 1042 if (Visited.count(DA.Id)) 1043 continue; 1044 if (!(DA.Addr->getFlags() & NodeAttrs::Clobbering)) 1045 continue; 1046 1047 NodeList Rel = getRelatedRefs(IA, DA); 1048 NodeAddr<DefNode*> PDA = Rel.front(); 1049 RegisterRef RR = PDA.Addr->getRegRef(*this); 1050 1051 // Push the definition on the stack for the register and all aliases. 1052 // The def stack traversal in linkNodeUp will check the exact aliasing. 1053 DefM[RR.Reg].push(DA); 1054 Defined.insert(RR.Reg); 1055 for (RegisterId A : PRI.getAliasSet(RR.Reg)) { 1056 // Check that we don't push the same def twice. 1057 assert(A != RR.Reg); 1058 if (!Defined.count(A)) 1059 DefM[A].push(DA); 1060 } 1061 // Mark all the related defs as visited. 1062 for (NodeAddr<NodeBase*> T : Rel) 1063 Visited.insert(T.Id); 1064 } 1065 } 1066 1067 // Push all definitions from the instruction node IA to an appropriate 1068 // stack in DefM. 1069 void DataFlowGraph::pushDefs(NodeAddr<InstrNode*> IA, DefStackMap &DefM) { 1070 NodeSet Visited; 1071 #ifndef NDEBUG 1072 std::set<RegisterId> Defined; 1073 #endif 1074 1075 // The important objectives of this function are: 1076 // - to be able to handle instructions both while the graph is being 1077 // constructed, and after the graph has been constructed, and 1078 // - maintain proper ordering of definitions on the stack for each 1079 // register reference: 1080 // - if there are two or more related defs in IA (i.e. coming from 1081 // the same machine operand), then only push one def on the stack, 1082 // - if there are multiple unrelated defs of non-overlapping 1083 // subregisters of S, then the stack for S will have both (in an 1084 // unspecified order), but the order does not matter from the data- 1085 // -flow perspective. 1086 1087 for (NodeAddr<DefNode*> DA : IA.Addr->members_if(IsDef, *this)) { 1088 if (Visited.count(DA.Id)) 1089 continue; 1090 if (DA.Addr->getFlags() & NodeAttrs::Clobbering) 1091 continue; 1092 1093 NodeList Rel = getRelatedRefs(IA, DA); 1094 NodeAddr<DefNode*> PDA = Rel.front(); 1095 RegisterRef RR = PDA.Addr->getRegRef(*this); 1096 #ifndef NDEBUG 1097 // Assert if the register is defined in two or more unrelated defs. 1098 // This could happen if there are two or more def operands defining it. 1099 if (!Defined.insert(RR.Reg).second) { 1100 MachineInstr *MI = NodeAddr<StmtNode*>(IA).Addr->getCode(); 1101 dbgs() << "Multiple definitions of register: " 1102 << Print<RegisterRef>(RR, *this) << " in\n " << *MI << "in " 1103 << printMBBReference(*MI->getParent()) << '\n'; 1104 llvm_unreachable(nullptr); 1105 } 1106 #endif 1107 // Push the definition on the stack for the register and all aliases. 1108 // The def stack traversal in linkNodeUp will check the exact aliasing. 1109 DefM[RR.Reg].push(DA); 1110 for (RegisterId A : PRI.getAliasSet(RR.Reg)) { 1111 // Check that we don't push the same def twice. 1112 assert(A != RR.Reg); 1113 DefM[A].push(DA); 1114 } 1115 // Mark all the related defs as visited. 1116 for (NodeAddr<NodeBase*> T : Rel) 1117 Visited.insert(T.Id); 1118 } 1119 } 1120 1121 // Return the list of all reference nodes related to RA, including RA itself. 1122 // See "getNextRelated" for the meaning of a "related reference". 1123 NodeList DataFlowGraph::getRelatedRefs(NodeAddr<InstrNode*> IA, 1124 NodeAddr<RefNode*> RA) const { 1125 assert(IA.Id != 0 && RA.Id != 0); 1126 1127 NodeList Refs; 1128 NodeId Start = RA.Id; 1129 do { 1130 Refs.push_back(RA); 1131 RA = getNextRelated(IA, RA); 1132 } while (RA.Id != 0 && RA.Id != Start); 1133 return Refs; 1134 } 1135 1136 // Clear all information in the graph. 1137 void DataFlowGraph::reset() { 1138 Memory.clear(); 1139 BlockNodes.clear(); 1140 Func = NodeAddr<FuncNode*>(); 1141 } 1142 1143 // Return the next reference node in the instruction node IA that is related 1144 // to RA. Conceptually, two reference nodes are related if they refer to the 1145 // same instance of a register access, but differ in flags or other minor 1146 // characteristics. Specific examples of related nodes are shadow reference 1147 // nodes. 1148 // Return the equivalent of nullptr if there are no more related references. 1149 NodeAddr<RefNode*> DataFlowGraph::getNextRelated(NodeAddr<InstrNode*> IA, 1150 NodeAddr<RefNode*> RA) const { 1151 assert(IA.Id != 0 && RA.Id != 0); 1152 1153 auto Related = [this,RA](NodeAddr<RefNode*> TA) -> bool { 1154 if (TA.Addr->getKind() != RA.Addr->getKind()) 1155 return false; 1156 if (TA.Addr->getRegRef(*this) != RA.Addr->getRegRef(*this)) 1157 return false; 1158 return true; 1159 }; 1160 auto RelatedStmt = [&Related,RA](NodeAddr<RefNode*> TA) -> bool { 1161 return Related(TA) && 1162 &RA.Addr->getOp() == &TA.Addr->getOp(); 1163 }; 1164 auto RelatedPhi = [&Related,RA](NodeAddr<RefNode*> TA) -> bool { 1165 if (!Related(TA)) 1166 return false; 1167 if (TA.Addr->getKind() != NodeAttrs::Use) 1168 return true; 1169 // For phi uses, compare predecessor blocks. 1170 const NodeAddr<const PhiUseNode*> TUA = TA; 1171 const NodeAddr<const PhiUseNode*> RUA = RA; 1172 return TUA.Addr->getPredecessor() == RUA.Addr->getPredecessor(); 1173 }; 1174 1175 RegisterRef RR = RA.Addr->getRegRef(*this); 1176 if (IA.Addr->getKind() == NodeAttrs::Stmt) 1177 return RA.Addr->getNextRef(RR, RelatedStmt, true, *this); 1178 return RA.Addr->getNextRef(RR, RelatedPhi, true, *this); 1179 } 1180 1181 // Find the next node related to RA in IA that satisfies condition P. 1182 // If such a node was found, return a pair where the second element is the 1183 // located node. If such a node does not exist, return a pair where the 1184 // first element is the element after which such a node should be inserted, 1185 // and the second element is a null-address. 1186 template <typename Predicate> 1187 std::pair<NodeAddr<RefNode*>,NodeAddr<RefNode*>> 1188 DataFlowGraph::locateNextRef(NodeAddr<InstrNode*> IA, NodeAddr<RefNode*> RA, 1189 Predicate P) const { 1190 assert(IA.Id != 0 && RA.Id != 0); 1191 1192 NodeAddr<RefNode*> NA; 1193 NodeId Start = RA.Id; 1194 while (true) { 1195 NA = getNextRelated(IA, RA); 1196 if (NA.Id == 0 || NA.Id == Start) 1197 break; 1198 if (P(NA)) 1199 break; 1200 RA = NA; 1201 } 1202 1203 if (NA.Id != 0 && NA.Id != Start) 1204 return std::make_pair(RA, NA); 1205 return std::make_pair(RA, NodeAddr<RefNode*>()); 1206 } 1207 1208 // Get the next shadow node in IA corresponding to RA, and optionally create 1209 // such a node if it does not exist. 1210 NodeAddr<RefNode*> DataFlowGraph::getNextShadow(NodeAddr<InstrNode*> IA, 1211 NodeAddr<RefNode*> RA, bool Create) { 1212 assert(IA.Id != 0 && RA.Id != 0); 1213 1214 uint16_t Flags = RA.Addr->getFlags() | NodeAttrs::Shadow; 1215 auto IsShadow = [Flags] (NodeAddr<RefNode*> TA) -> bool { 1216 return TA.Addr->getFlags() == Flags; 1217 }; 1218 auto Loc = locateNextRef(IA, RA, IsShadow); 1219 if (Loc.second.Id != 0 || !Create) 1220 return Loc.second; 1221 1222 // Create a copy of RA and mark is as shadow. 1223 NodeAddr<RefNode*> NA = cloneNode(RA); 1224 NA.Addr->setFlags(Flags | NodeAttrs::Shadow); 1225 IA.Addr->addMemberAfter(Loc.first, NA, *this); 1226 return NA; 1227 } 1228 1229 // Get the next shadow node in IA corresponding to RA. Return null-address 1230 // if such a node does not exist. 1231 NodeAddr<RefNode*> DataFlowGraph::getNextShadow(NodeAddr<InstrNode*> IA, 1232 NodeAddr<RefNode*> RA) const { 1233 assert(IA.Id != 0 && RA.Id != 0); 1234 uint16_t Flags = RA.Addr->getFlags() | NodeAttrs::Shadow; 1235 auto IsShadow = [Flags] (NodeAddr<RefNode*> TA) -> bool { 1236 return TA.Addr->getFlags() == Flags; 1237 }; 1238 return locateNextRef(IA, RA, IsShadow).second; 1239 } 1240 1241 // Create a new statement node in the block node BA that corresponds to 1242 // the machine instruction MI. 1243 void DataFlowGraph::buildStmt(NodeAddr<BlockNode*> BA, MachineInstr &In) { 1244 NodeAddr<StmtNode*> SA = newStmt(BA, &In); 1245 1246 auto isCall = [] (const MachineInstr &In) -> bool { 1247 if (In.isCall()) 1248 return true; 1249 // Is tail call? 1250 if (In.isBranch()) { 1251 for (const MachineOperand &Op : In.operands()) 1252 if (Op.isGlobal() || Op.isSymbol()) 1253 return true; 1254 // Assume indirect branches are calls. This is for the purpose of 1255 // keeping implicit operands, and so it won't hurt on intra-function 1256 // indirect branches. 1257 if (In.isIndirectBranch()) 1258 return true; 1259 } 1260 return false; 1261 }; 1262 1263 auto isDefUndef = [this] (const MachineInstr &In, RegisterRef DR) -> bool { 1264 // This instruction defines DR. Check if there is a use operand that 1265 // would make DR live on entry to the instruction. 1266 for (const MachineOperand &Op : In.operands()) { 1267 if (!Op.isReg() || Op.getReg() == 0 || !Op.isUse() || Op.isUndef()) 1268 continue; 1269 RegisterRef UR = makeRegRef(Op); 1270 if (PRI.alias(DR, UR)) 1271 return false; 1272 } 1273 return true; 1274 }; 1275 1276 bool IsCall = isCall(In); 1277 unsigned NumOps = In.getNumOperands(); 1278 1279 // Avoid duplicate implicit defs. This will not detect cases of implicit 1280 // defs that define registers that overlap, but it is not clear how to 1281 // interpret that in the absence of explicit defs. Overlapping explicit 1282 // defs are likely illegal already. 1283 BitVector DoneDefs(TRI.getNumRegs()); 1284 // Process explicit defs first. 1285 for (unsigned OpN = 0; OpN < NumOps; ++OpN) { 1286 MachineOperand &Op = In.getOperand(OpN); 1287 if (!Op.isReg() || !Op.isDef() || Op.isImplicit()) 1288 continue; 1289 Register R = Op.getReg(); 1290 if (!R || !Register::isPhysicalRegister(R)) 1291 continue; 1292 uint16_t Flags = NodeAttrs::None; 1293 if (TOI.isPreserving(In, OpN)) { 1294 Flags |= NodeAttrs::Preserving; 1295 // If the def is preserving, check if it is also undefined. 1296 if (isDefUndef(In, makeRegRef(Op))) 1297 Flags |= NodeAttrs::Undef; 1298 } 1299 if (TOI.isClobbering(In, OpN)) 1300 Flags |= NodeAttrs::Clobbering; 1301 if (TOI.isFixedReg(In, OpN)) 1302 Flags |= NodeAttrs::Fixed; 1303 if (IsCall && Op.isDead()) 1304 Flags |= NodeAttrs::Dead; 1305 NodeAddr<DefNode*> DA = newDef(SA, Op, Flags); 1306 SA.Addr->addMember(DA, *this); 1307 assert(!DoneDefs.test(R)); 1308 DoneDefs.set(R); 1309 } 1310 1311 // Process reg-masks (as clobbers). 1312 BitVector DoneClobbers(TRI.getNumRegs()); 1313 for (unsigned OpN = 0; OpN < NumOps; ++OpN) { 1314 MachineOperand &Op = In.getOperand(OpN); 1315 if (!Op.isRegMask()) 1316 continue; 1317 uint16_t Flags = NodeAttrs::Clobbering | NodeAttrs::Fixed | 1318 NodeAttrs::Dead; 1319 NodeAddr<DefNode*> DA = newDef(SA, Op, Flags); 1320 SA.Addr->addMember(DA, *this); 1321 // Record all clobbered registers in DoneDefs. 1322 const uint32_t *RM = Op.getRegMask(); 1323 for (unsigned i = 1, e = TRI.getNumRegs(); i != e; ++i) 1324 if (!(RM[i/32] & (1u << (i%32)))) 1325 DoneClobbers.set(i); 1326 } 1327 1328 // Process implicit defs, skipping those that have already been added 1329 // as explicit. 1330 for (unsigned OpN = 0; OpN < NumOps; ++OpN) { 1331 MachineOperand &Op = In.getOperand(OpN); 1332 if (!Op.isReg() || !Op.isDef() || !Op.isImplicit()) 1333 continue; 1334 Register R = Op.getReg(); 1335 if (!R || !Register::isPhysicalRegister(R) || DoneDefs.test(R)) 1336 continue; 1337 RegisterRef RR = makeRegRef(Op); 1338 uint16_t Flags = NodeAttrs::None; 1339 if (TOI.isPreserving(In, OpN)) { 1340 Flags |= NodeAttrs::Preserving; 1341 // If the def is preserving, check if it is also undefined. 1342 if (isDefUndef(In, RR)) 1343 Flags |= NodeAttrs::Undef; 1344 } 1345 if (TOI.isClobbering(In, OpN)) 1346 Flags |= NodeAttrs::Clobbering; 1347 if (TOI.isFixedReg(In, OpN)) 1348 Flags |= NodeAttrs::Fixed; 1349 if (IsCall && Op.isDead()) { 1350 if (DoneClobbers.test(R)) 1351 continue; 1352 Flags |= NodeAttrs::Dead; 1353 } 1354 NodeAddr<DefNode*> DA = newDef(SA, Op, Flags); 1355 SA.Addr->addMember(DA, *this); 1356 DoneDefs.set(R); 1357 } 1358 1359 for (unsigned OpN = 0; OpN < NumOps; ++OpN) { 1360 MachineOperand &Op = In.getOperand(OpN); 1361 if (!Op.isReg() || !Op.isUse()) 1362 continue; 1363 Register R = Op.getReg(); 1364 if (!R || !Register::isPhysicalRegister(R)) 1365 continue; 1366 uint16_t Flags = NodeAttrs::None; 1367 if (Op.isUndef()) 1368 Flags |= NodeAttrs::Undef; 1369 if (TOI.isFixedReg(In, OpN)) 1370 Flags |= NodeAttrs::Fixed; 1371 NodeAddr<UseNode*> UA = newUse(SA, Op, Flags); 1372 SA.Addr->addMember(UA, *this); 1373 } 1374 } 1375 1376 // Scan all defs in the block node BA and record in PhiM the locations of 1377 // phi nodes corresponding to these defs. 1378 void DataFlowGraph::recordDefsForDF(BlockRefsMap &PhiM, 1379 NodeAddr<BlockNode*> BA) { 1380 // Check all defs from block BA and record them in each block in BA's 1381 // iterated dominance frontier. This information will later be used to 1382 // create phi nodes. 1383 MachineBasicBlock *BB = BA.Addr->getCode(); 1384 assert(BB); 1385 auto DFLoc = MDF.find(BB); 1386 if (DFLoc == MDF.end() || DFLoc->second.empty()) 1387 return; 1388 1389 // Traverse all instructions in the block and collect the set of all 1390 // defined references. For each reference there will be a phi created 1391 // in the block's iterated dominance frontier. 1392 // This is done to make sure that each defined reference gets only one 1393 // phi node, even if it is defined multiple times. 1394 RegisterSet Defs; 1395 for (NodeAddr<InstrNode*> IA : BA.Addr->members(*this)) 1396 for (NodeAddr<RefNode*> RA : IA.Addr->members_if(IsDef, *this)) 1397 Defs.insert(RA.Addr->getRegRef(*this)); 1398 1399 // Calculate the iterated dominance frontier of BB. 1400 const MachineDominanceFrontier::DomSetType &DF = DFLoc->second; 1401 SetVector<MachineBasicBlock*> IDF(DF.begin(), DF.end()); 1402 for (unsigned i = 0; i < IDF.size(); ++i) { 1403 auto F = MDF.find(IDF[i]); 1404 if (F != MDF.end()) 1405 IDF.insert(F->second.begin(), F->second.end()); 1406 } 1407 1408 // Finally, add the set of defs to each block in the iterated dominance 1409 // frontier. 1410 for (auto DB : IDF) { 1411 NodeAddr<BlockNode*> DBA = findBlock(DB); 1412 PhiM[DBA.Id].insert(Defs.begin(), Defs.end()); 1413 } 1414 } 1415 1416 // Given the locations of phi nodes in the map PhiM, create the phi nodes 1417 // that are located in the block node BA. 1418 void DataFlowGraph::buildPhis(BlockRefsMap &PhiM, RegisterSet &AllRefs, 1419 NodeAddr<BlockNode*> BA) { 1420 // Check if this blocks has any DF defs, i.e. if there are any defs 1421 // that this block is in the iterated dominance frontier of. 1422 auto HasDF = PhiM.find(BA.Id); 1423 if (HasDF == PhiM.end() || HasDF->second.empty()) 1424 return; 1425 1426 // First, remove all R in Refs in such that there exists T in Refs 1427 // such that T covers R. In other words, only leave those refs that 1428 // are not covered by another ref (i.e. maximal with respect to covering). 1429 1430 auto MaxCoverIn = [this] (RegisterRef RR, RegisterSet &RRs) -> RegisterRef { 1431 for (RegisterRef I : RRs) 1432 if (I != RR && RegisterAggr::isCoverOf(I, RR, PRI)) 1433 RR = I; 1434 return RR; 1435 }; 1436 1437 RegisterSet MaxDF; 1438 for (RegisterRef I : HasDF->second) 1439 MaxDF.insert(MaxCoverIn(I, HasDF->second)); 1440 1441 std::vector<RegisterRef> MaxRefs; 1442 for (RegisterRef I : MaxDF) 1443 MaxRefs.push_back(MaxCoverIn(I, AllRefs)); 1444 1445 // Now, for each R in MaxRefs, get the alias closure of R. If the closure 1446 // only has R in it, create a phi a def for R. Otherwise, create a phi, 1447 // and add a def for each S in the closure. 1448 1449 // Sort the refs so that the phis will be created in a deterministic order. 1450 llvm::sort(MaxRefs); 1451 // Remove duplicates. 1452 auto NewEnd = std::unique(MaxRefs.begin(), MaxRefs.end()); 1453 MaxRefs.erase(NewEnd, MaxRefs.end()); 1454 1455 auto Aliased = [this,&MaxRefs](RegisterRef RR, 1456 std::vector<unsigned> &Closure) -> bool { 1457 for (unsigned I : Closure) 1458 if (PRI.alias(RR, MaxRefs[I])) 1459 return true; 1460 return false; 1461 }; 1462 1463 // Prepare a list of NodeIds of the block's predecessors. 1464 NodeList Preds; 1465 const MachineBasicBlock *MBB = BA.Addr->getCode(); 1466 for (MachineBasicBlock *PB : MBB->predecessors()) 1467 Preds.push_back(findBlock(PB)); 1468 1469 while (!MaxRefs.empty()) { 1470 // Put the first element in the closure, and then add all subsequent 1471 // elements from MaxRefs to it, if they alias at least one element 1472 // already in the closure. 1473 // ClosureIdx: vector of indices in MaxRefs of members of the closure. 1474 std::vector<unsigned> ClosureIdx = { 0 }; 1475 for (unsigned i = 1; i != MaxRefs.size(); ++i) 1476 if (Aliased(MaxRefs[i], ClosureIdx)) 1477 ClosureIdx.push_back(i); 1478 1479 // Build a phi for the closure. 1480 unsigned CS = ClosureIdx.size(); 1481 NodeAddr<PhiNode*> PA = newPhi(BA); 1482 1483 // Add defs. 1484 for (unsigned X = 0; X != CS; ++X) { 1485 RegisterRef RR = MaxRefs[ClosureIdx[X]]; 1486 uint16_t PhiFlags = NodeAttrs::PhiRef | NodeAttrs::Preserving; 1487 NodeAddr<DefNode*> DA = newDef(PA, RR, PhiFlags); 1488 PA.Addr->addMember(DA, *this); 1489 } 1490 // Add phi uses. 1491 for (NodeAddr<BlockNode*> PBA : Preds) { 1492 for (unsigned X = 0; X != CS; ++X) { 1493 RegisterRef RR = MaxRefs[ClosureIdx[X]]; 1494 NodeAddr<PhiUseNode*> PUA = newPhiUse(PA, RR, PBA); 1495 PA.Addr->addMember(PUA, *this); 1496 } 1497 } 1498 1499 // Erase from MaxRefs all elements in the closure. 1500 auto Begin = MaxRefs.begin(); 1501 for (unsigned Idx : llvm::reverse(ClosureIdx)) 1502 MaxRefs.erase(Begin + Idx); 1503 } 1504 } 1505 1506 // Remove any unneeded phi nodes that were created during the build process. 1507 void DataFlowGraph::removeUnusedPhis() { 1508 // This will remove unused phis, i.e. phis where each def does not reach 1509 // any uses or other defs. This will not detect or remove circular phi 1510 // chains that are otherwise dead. Unused/dead phis are created during 1511 // the build process and this function is intended to remove these cases 1512 // that are easily determinable to be unnecessary. 1513 1514 SetVector<NodeId> PhiQ; 1515 for (NodeAddr<BlockNode*> BA : Func.Addr->members(*this)) { 1516 for (auto P : BA.Addr->members_if(IsPhi, *this)) 1517 PhiQ.insert(P.Id); 1518 } 1519 1520 static auto HasUsedDef = [](NodeList &Ms) -> bool { 1521 for (NodeAddr<NodeBase*> M : Ms) { 1522 if (M.Addr->getKind() != NodeAttrs::Def) 1523 continue; 1524 NodeAddr<DefNode*> DA = M; 1525 if (DA.Addr->getReachedDef() != 0 || DA.Addr->getReachedUse() != 0) 1526 return true; 1527 } 1528 return false; 1529 }; 1530 1531 // Any phi, if it is removed, may affect other phis (make them dead). 1532 // For each removed phi, collect the potentially affected phis and add 1533 // them back to the queue. 1534 while (!PhiQ.empty()) { 1535 auto PA = addr<PhiNode*>(PhiQ[0]); 1536 PhiQ.remove(PA.Id); 1537 NodeList Refs = PA.Addr->members(*this); 1538 if (HasUsedDef(Refs)) 1539 continue; 1540 for (NodeAddr<RefNode*> RA : Refs) { 1541 if (NodeId RD = RA.Addr->getReachingDef()) { 1542 auto RDA = addr<DefNode*>(RD); 1543 NodeAddr<InstrNode*> OA = RDA.Addr->getOwner(*this); 1544 if (IsPhi(OA)) 1545 PhiQ.insert(OA.Id); 1546 } 1547 if (RA.Addr->isDef()) 1548 unlinkDef(RA, true); 1549 else 1550 unlinkUse(RA, true); 1551 } 1552 NodeAddr<BlockNode*> BA = PA.Addr->getOwner(*this); 1553 BA.Addr->removeMember(PA, *this); 1554 } 1555 } 1556 1557 // For a given reference node TA in an instruction node IA, connect the 1558 // reaching def of TA to the appropriate def node. Create any shadow nodes 1559 // as appropriate. 1560 template <typename T> 1561 void DataFlowGraph::linkRefUp(NodeAddr<InstrNode*> IA, NodeAddr<T> TA, 1562 DefStack &DS) { 1563 if (DS.empty()) 1564 return; 1565 RegisterRef RR = TA.Addr->getRegRef(*this); 1566 NodeAddr<T> TAP; 1567 1568 // References from the def stack that have been examined so far. 1569 RegisterAggr Defs(PRI); 1570 1571 for (auto I = DS.top(), E = DS.bottom(); I != E; I.down()) { 1572 RegisterRef QR = I->Addr->getRegRef(*this); 1573 1574 // Skip all defs that are aliased to any of the defs that we have already 1575 // seen. If this completes a cover of RR, stop the stack traversal. 1576 bool Alias = Defs.hasAliasOf(QR); 1577 bool Cover = Defs.insert(QR).hasCoverOf(RR); 1578 if (Alias) { 1579 if (Cover) 1580 break; 1581 continue; 1582 } 1583 1584 // The reaching def. 1585 NodeAddr<DefNode*> RDA = *I; 1586 1587 // Pick the reached node. 1588 if (TAP.Id == 0) { 1589 TAP = TA; 1590 } else { 1591 // Mark the existing ref as "shadow" and create a new shadow. 1592 TAP.Addr->setFlags(TAP.Addr->getFlags() | NodeAttrs::Shadow); 1593 TAP = getNextShadow(IA, TAP, true); 1594 } 1595 1596 // Create the link. 1597 TAP.Addr->linkToDef(TAP.Id, RDA); 1598 1599 if (Cover) 1600 break; 1601 } 1602 } 1603 1604 // Create data-flow links for all reference nodes in the statement node SA. 1605 template <typename Predicate> 1606 void DataFlowGraph::linkStmtRefs(DefStackMap &DefM, NodeAddr<StmtNode*> SA, 1607 Predicate P) { 1608 #ifndef NDEBUG 1609 RegisterSet Defs; 1610 #endif 1611 1612 // Link all nodes (upwards in the data-flow) with their reaching defs. 1613 for (NodeAddr<RefNode*> RA : SA.Addr->members_if(P, *this)) { 1614 uint16_t Kind = RA.Addr->getKind(); 1615 assert(Kind == NodeAttrs::Def || Kind == NodeAttrs::Use); 1616 RegisterRef RR = RA.Addr->getRegRef(*this); 1617 #ifndef NDEBUG 1618 // Do not expect multiple defs of the same reference. 1619 assert(Kind != NodeAttrs::Def || !Defs.count(RR)); 1620 Defs.insert(RR); 1621 #endif 1622 1623 auto F = DefM.find(RR.Reg); 1624 if (F == DefM.end()) 1625 continue; 1626 DefStack &DS = F->second; 1627 if (Kind == NodeAttrs::Use) 1628 linkRefUp<UseNode*>(SA, RA, DS); 1629 else if (Kind == NodeAttrs::Def) 1630 linkRefUp<DefNode*>(SA, RA, DS); 1631 else 1632 llvm_unreachable("Unexpected node in instruction"); 1633 } 1634 } 1635 1636 // Create data-flow links for all instructions in the block node BA. This 1637 // will include updating any phi nodes in BA. 1638 void DataFlowGraph::linkBlockRefs(DefStackMap &DefM, NodeAddr<BlockNode*> BA) { 1639 // Push block delimiters. 1640 markBlock(BA.Id, DefM); 1641 1642 auto IsClobber = [] (NodeAddr<RefNode*> RA) -> bool { 1643 return IsDef(RA) && (RA.Addr->getFlags() & NodeAttrs::Clobbering); 1644 }; 1645 auto IsNoClobber = [] (NodeAddr<RefNode*> RA) -> bool { 1646 return IsDef(RA) && !(RA.Addr->getFlags() & NodeAttrs::Clobbering); 1647 }; 1648 1649 assert(BA.Addr && "block node address is needed to create a data-flow link"); 1650 // For each non-phi instruction in the block, link all the defs and uses 1651 // to their reaching defs. For any member of the block (including phis), 1652 // push the defs on the corresponding stacks. 1653 for (NodeAddr<InstrNode*> IA : BA.Addr->members(*this)) { 1654 // Ignore phi nodes here. They will be linked part by part from the 1655 // predecessors. 1656 if (IA.Addr->getKind() == NodeAttrs::Stmt) { 1657 linkStmtRefs(DefM, IA, IsUse); 1658 linkStmtRefs(DefM, IA, IsClobber); 1659 } 1660 1661 // Push the definitions on the stack. 1662 pushClobbers(IA, DefM); 1663 1664 if (IA.Addr->getKind() == NodeAttrs::Stmt) 1665 linkStmtRefs(DefM, IA, IsNoClobber); 1666 1667 pushDefs(IA, DefM); 1668 } 1669 1670 // Recursively process all children in the dominator tree. 1671 MachineDomTreeNode *N = MDT.getNode(BA.Addr->getCode()); 1672 for (auto I : *N) { 1673 MachineBasicBlock *SB = I->getBlock(); 1674 NodeAddr<BlockNode*> SBA = findBlock(SB); 1675 linkBlockRefs(DefM, SBA); 1676 } 1677 1678 // Link the phi uses from the successor blocks. 1679 auto IsUseForBA = [BA](NodeAddr<NodeBase*> NA) -> bool { 1680 if (NA.Addr->getKind() != NodeAttrs::Use) 1681 return false; 1682 assert(NA.Addr->getFlags() & NodeAttrs::PhiRef); 1683 NodeAddr<PhiUseNode*> PUA = NA; 1684 return PUA.Addr->getPredecessor() == BA.Id; 1685 }; 1686 1687 RegisterSet EHLiveIns = getLandingPadLiveIns(); 1688 MachineBasicBlock *MBB = BA.Addr->getCode(); 1689 1690 for (MachineBasicBlock *SB : MBB->successors()) { 1691 bool IsEHPad = SB->isEHPad(); 1692 NodeAddr<BlockNode*> SBA = findBlock(SB); 1693 for (NodeAddr<InstrNode*> IA : SBA.Addr->members_if(IsPhi, *this)) { 1694 // Do not link phi uses for landing pad live-ins. 1695 if (IsEHPad) { 1696 // Find what register this phi is for. 1697 NodeAddr<RefNode*> RA = IA.Addr->getFirstMember(*this); 1698 assert(RA.Id != 0); 1699 if (EHLiveIns.count(RA.Addr->getRegRef(*this))) 1700 continue; 1701 } 1702 // Go over each phi use associated with MBB, and link it. 1703 for (auto U : IA.Addr->members_if(IsUseForBA, *this)) { 1704 NodeAddr<PhiUseNode*> PUA = U; 1705 RegisterRef RR = PUA.Addr->getRegRef(*this); 1706 linkRefUp<UseNode*>(IA, PUA, DefM[RR.Reg]); 1707 } 1708 } 1709 } 1710 1711 // Pop all defs from this block from the definition stacks. 1712 releaseBlock(BA.Id, DefM); 1713 } 1714 1715 // Remove the use node UA from any data-flow and structural links. 1716 void DataFlowGraph::unlinkUseDF(NodeAddr<UseNode*> UA) { 1717 NodeId RD = UA.Addr->getReachingDef(); 1718 NodeId Sib = UA.Addr->getSibling(); 1719 1720 if (RD == 0) { 1721 assert(Sib == 0); 1722 return; 1723 } 1724 1725 auto RDA = addr<DefNode*>(RD); 1726 auto TA = addr<UseNode*>(RDA.Addr->getReachedUse()); 1727 if (TA.Id == UA.Id) { 1728 RDA.Addr->setReachedUse(Sib); 1729 return; 1730 } 1731 1732 while (TA.Id != 0) { 1733 NodeId S = TA.Addr->getSibling(); 1734 if (S == UA.Id) { 1735 TA.Addr->setSibling(UA.Addr->getSibling()); 1736 return; 1737 } 1738 TA = addr<UseNode*>(S); 1739 } 1740 } 1741 1742 // Remove the def node DA from any data-flow and structural links. 1743 void DataFlowGraph::unlinkDefDF(NodeAddr<DefNode*> DA) { 1744 // 1745 // RD 1746 // | reached 1747 // | def 1748 // : 1749 // . 1750 // +----+ 1751 // ... -- | DA | -- ... -- 0 : sibling chain of DA 1752 // +----+ 1753 // | | reached 1754 // | : def 1755 // | . 1756 // | ... : Siblings (defs) 1757 // | 1758 // : reached 1759 // . use 1760 // ... : sibling chain of reached uses 1761 1762 NodeId RD = DA.Addr->getReachingDef(); 1763 1764 // Visit all siblings of the reached def and reset their reaching defs. 1765 // Also, defs reached by DA are now "promoted" to being reached by RD, 1766 // so all of them will need to be spliced into the sibling chain where 1767 // DA belongs. 1768 auto getAllNodes = [this] (NodeId N) -> NodeList { 1769 NodeList Res; 1770 while (N) { 1771 auto RA = addr<RefNode*>(N); 1772 // Keep the nodes in the exact sibling order. 1773 Res.push_back(RA); 1774 N = RA.Addr->getSibling(); 1775 } 1776 return Res; 1777 }; 1778 NodeList ReachedDefs = getAllNodes(DA.Addr->getReachedDef()); 1779 NodeList ReachedUses = getAllNodes(DA.Addr->getReachedUse()); 1780 1781 if (RD == 0) { 1782 for (NodeAddr<RefNode*> I : ReachedDefs) 1783 I.Addr->setSibling(0); 1784 for (NodeAddr<RefNode*> I : ReachedUses) 1785 I.Addr->setSibling(0); 1786 } 1787 for (NodeAddr<DefNode*> I : ReachedDefs) 1788 I.Addr->setReachingDef(RD); 1789 for (NodeAddr<UseNode*> I : ReachedUses) 1790 I.Addr->setReachingDef(RD); 1791 1792 NodeId Sib = DA.Addr->getSibling(); 1793 if (RD == 0) { 1794 assert(Sib == 0); 1795 return; 1796 } 1797 1798 // Update the reaching def node and remove DA from the sibling list. 1799 auto RDA = addr<DefNode*>(RD); 1800 auto TA = addr<DefNode*>(RDA.Addr->getReachedDef()); 1801 if (TA.Id == DA.Id) { 1802 // If DA is the first reached def, just update the RD's reached def 1803 // to the DA's sibling. 1804 RDA.Addr->setReachedDef(Sib); 1805 } else { 1806 // Otherwise, traverse the sibling list of the reached defs and remove 1807 // DA from it. 1808 while (TA.Id != 0) { 1809 NodeId S = TA.Addr->getSibling(); 1810 if (S == DA.Id) { 1811 TA.Addr->setSibling(Sib); 1812 break; 1813 } 1814 TA = addr<DefNode*>(S); 1815 } 1816 } 1817 1818 // Splice the DA's reached defs into the RDA's reached def chain. 1819 if (!ReachedDefs.empty()) { 1820 auto Last = NodeAddr<DefNode*>(ReachedDefs.back()); 1821 Last.Addr->setSibling(RDA.Addr->getReachedDef()); 1822 RDA.Addr->setReachedDef(ReachedDefs.front().Id); 1823 } 1824 // Splice the DA's reached uses into the RDA's reached use chain. 1825 if (!ReachedUses.empty()) { 1826 auto Last = NodeAddr<UseNode*>(ReachedUses.back()); 1827 Last.Addr->setSibling(RDA.Addr->getReachedUse()); 1828 RDA.Addr->setReachedUse(ReachedUses.front().Id); 1829 } 1830 } 1831