1 //===- VarLocBasedImpl.cpp - Tracking Debug Value MIs with VarLoc class----===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 /// 9 /// \file VarLocBasedImpl.cpp 10 /// 11 /// LiveDebugValues is an optimistic "available expressions" dataflow 12 /// algorithm. The set of expressions is the set of machine locations 13 /// (registers, spill slots, constants) that a variable fragment might be 14 /// located, qualified by a DIExpression and indirect-ness flag, while each 15 /// variable is identified by a DebugVariable object. The availability of an 16 /// expression begins when a DBG_VALUE instruction specifies the location of a 17 /// DebugVariable, and continues until that location is clobbered or 18 /// re-specified by a different DBG_VALUE for the same DebugVariable. 19 /// 20 /// The output of LiveDebugValues is additional DBG_VALUE instructions, 21 /// placed to extend variable locations as far they're available. This file 22 /// and the VarLocBasedLDV class is an implementation that explicitly tracks 23 /// locations, using the VarLoc class. 24 /// 25 /// The canonical "available expressions" problem doesn't have expression 26 /// clobbering, instead when a variable is re-assigned, any expressions using 27 /// that variable get invalidated. LiveDebugValues can map onto "available 28 /// expressions" by having every register represented by a variable, which is 29 /// used in an expression that becomes available at a DBG_VALUE instruction. 30 /// When the register is clobbered, its variable is effectively reassigned, and 31 /// expressions computed from it become unavailable. A similar construct is 32 /// needed when a DebugVariable has its location re-specified, to invalidate 33 /// all other locations for that DebugVariable. 34 /// 35 /// Using the dataflow analysis to compute the available expressions, we create 36 /// a DBG_VALUE at the beginning of each block where the expression is 37 /// live-in. This propagates variable locations into every basic block where 38 /// the location can be determined, rather than only having DBG_VALUEs in blocks 39 /// where locations are specified due to an assignment or some optimization. 40 /// Movements of values between registers and spill slots are annotated with 41 /// DBG_VALUEs too to track variable values bewteen locations. All this allows 42 /// DbgEntityHistoryCalculator to focus on only the locations within individual 43 /// blocks, facilitating testing and improving modularity. 44 /// 45 /// We follow an optimisic dataflow approach, with this lattice: 46 /// 47 /// \verbatim 48 /// ┬ "Unknown" 49 /// | 50 /// v 51 /// True 52 /// | 53 /// v 54 /// ⊥ False 55 /// \endverbatim With "True" signifying that the expression is available (and 56 /// thus a DebugVariable's location is the corresponding register), while 57 /// "False" signifies that the expression is unavailable. "Unknown"s never 58 /// survive to the end of the analysis (see below). 59 /// 60 /// Formally, all DebugVariable locations that are live-out of a block are 61 /// initialized to \top. A blocks live-in values take the meet of the lattice 62 /// value for every predecessors live-outs, except for the entry block, where 63 /// all live-ins are \bot. The usual dataflow propagation occurs: the transfer 64 /// function for a block assigns an expression for a DebugVariable to be "True" 65 /// if a DBG_VALUE in the block specifies it; "False" if the location is 66 /// clobbered; or the live-in value if it is unaffected by the block. We 67 /// visit each block in reverse post order until a fixedpoint is reached. The 68 /// solution produced is maximal. 69 /// 70 /// Intuitively, we start by assuming that every expression / variable location 71 /// is at least "True", and then propagate "False" from the entry block and any 72 /// clobbers until there are no more changes to make. This gives us an accurate 73 /// solution because all incorrect locations will have a "False" propagated into 74 /// them. It also gives us a solution that copes well with loops by assuming 75 /// that variable locations are live-through every loop, and then removing those 76 /// that are not through dataflow. 77 /// 78 /// Within LiveDebugValues: each variable location is represented by a 79 /// VarLoc object that identifies the source variable, the set of 80 /// machine-locations that currently describe it (a single location for 81 /// DBG_VALUE or multiple for DBG_VALUE_LIST), and the DBG_VALUE inst that 82 /// specifies the location. Each VarLoc is indexed in the (function-scope) \p 83 /// VarLocMap, giving each VarLoc a set of unique indexes, each of which 84 /// corresponds to one of the VarLoc's machine-locations and can be used to 85 /// lookup the VarLoc in the VarLocMap. Rather than operate directly on machine 86 /// locations, the dataflow analysis in this pass identifies locations by their 87 /// indices in the VarLocMap, meaning all the variable locations in a block can 88 /// be described by a sparse vector of VarLocMap indicies. 89 /// 90 /// All the storage for the dataflow analysis is local to the ExtendRanges 91 /// method and passed down to helper methods. "OutLocs" and "InLocs" record the 92 /// in and out lattice values for each block. "OpenRanges" maintains a list of 93 /// variable locations and, with the "process" method, evaluates the transfer 94 /// function of each block. "flushPendingLocs" installs debug value instructions 95 /// for each live-in location at the start of blocks, while "Transfers" records 96 /// transfers of values between machine-locations. 97 /// 98 /// We avoid explicitly representing the "Unknown" (\top) lattice value in the 99 /// implementation. Instead, unvisited blocks implicitly have all lattice 100 /// values set as "Unknown". After being visited, there will be path back to 101 /// the entry block where the lattice value is "False", and as the transfer 102 /// function cannot make new "Unknown" locations, there are no scenarios where 103 /// a block can have an "Unknown" location after being visited. Similarly, we 104 /// don't enumerate all possible variable locations before exploring the 105 /// function: when a new location is discovered, all blocks previously explored 106 /// were implicitly "False" but unrecorded, and become explicitly "False" when 107 /// a new VarLoc is created with its bit not set in predecessor InLocs or 108 /// OutLocs. 109 /// 110 //===----------------------------------------------------------------------===// 111 112 #include "LiveDebugValues.h" 113 114 #include "llvm/ADT/CoalescingBitVector.h" 115 #include "llvm/ADT/DenseMap.h" 116 #include "llvm/ADT/PostOrderIterator.h" 117 #include "llvm/ADT/SmallPtrSet.h" 118 #include "llvm/ADT/SmallSet.h" 119 #include "llvm/ADT/SmallVector.h" 120 #include "llvm/ADT/Statistic.h" 121 #include "llvm/ADT/UniqueVector.h" 122 #include "llvm/CodeGen/LexicalScopes.h" 123 #include "llvm/CodeGen/MachineBasicBlock.h" 124 #include "llvm/CodeGen/MachineFrameInfo.h" 125 #include "llvm/CodeGen/MachineFunction.h" 126 #include "llvm/CodeGen/MachineFunctionPass.h" 127 #include "llvm/CodeGen/MachineInstr.h" 128 #include "llvm/CodeGen/MachineInstrBuilder.h" 129 #include "llvm/CodeGen/MachineMemOperand.h" 130 #include "llvm/CodeGen/MachineOperand.h" 131 #include "llvm/CodeGen/PseudoSourceValue.h" 132 #include "llvm/CodeGen/RegisterScavenging.h" 133 #include "llvm/CodeGen/TargetFrameLowering.h" 134 #include "llvm/CodeGen/TargetInstrInfo.h" 135 #include "llvm/CodeGen/TargetLowering.h" 136 #include "llvm/CodeGen/TargetPassConfig.h" 137 #include "llvm/CodeGen/TargetRegisterInfo.h" 138 #include "llvm/CodeGen/TargetSubtargetInfo.h" 139 #include "llvm/Config/llvm-config.h" 140 #include "llvm/IR/DIBuilder.h" 141 #include "llvm/IR/DebugInfoMetadata.h" 142 #include "llvm/IR/DebugLoc.h" 143 #include "llvm/IR/Function.h" 144 #include "llvm/IR/Module.h" 145 #include "llvm/InitializePasses.h" 146 #include "llvm/MC/MCRegisterInfo.h" 147 #include "llvm/Pass.h" 148 #include "llvm/Support/Casting.h" 149 #include "llvm/Support/Compiler.h" 150 #include "llvm/Support/Debug.h" 151 #include "llvm/Support/TypeSize.h" 152 #include "llvm/Support/raw_ostream.h" 153 #include "llvm/Target/TargetMachine.h" 154 #include <algorithm> 155 #include <cassert> 156 #include <cstdint> 157 #include <functional> 158 #include <queue> 159 #include <tuple> 160 #include <utility> 161 #include <vector> 162 163 using namespace llvm; 164 165 #define DEBUG_TYPE "livedebugvalues" 166 167 STATISTIC(NumInserted, "Number of DBG_VALUE instructions inserted"); 168 169 /// If \p Op is a stack or frame register return true, otherwise return false. 170 /// This is used to avoid basing the debug entry values on the registers, since 171 /// we do not support it at the moment. 172 static bool isRegOtherThanSPAndFP(const MachineOperand &Op, 173 const MachineInstr &MI, 174 const TargetRegisterInfo *TRI) { 175 if (!Op.isReg()) 176 return false; 177 178 const MachineFunction *MF = MI.getParent()->getParent(); 179 const TargetLowering *TLI = MF->getSubtarget().getTargetLowering(); 180 Register SP = TLI->getStackPointerRegisterToSaveRestore(); 181 Register FP = TRI->getFrameRegister(*MF); 182 Register Reg = Op.getReg(); 183 184 return Reg && Reg != SP && Reg != FP; 185 } 186 187 namespace { 188 189 // Max out the number of statically allocated elements in DefinedRegsSet, as 190 // this prevents fallback to std::set::count() operations. 191 using DefinedRegsSet = SmallSet<Register, 32>; 192 193 // The IDs in this set correspond to MachineLocs in VarLocs, as well as VarLocs 194 // that represent Entry Values; every VarLoc in the set will also appear 195 // exactly once at Location=0. 196 // As a result, each VarLoc may appear more than once in this "set", but each 197 // range corresponding to a Reg, SpillLoc, or EntryValue type will still be a 198 // "true" set (i.e. each VarLoc may appear only once), and the range Location=0 199 // is the set of all VarLocs. 200 using VarLocSet = CoalescingBitVector<uint64_t>; 201 202 /// A type-checked pair of {Register Location (or 0), Index}, used to index 203 /// into a \ref VarLocMap. This can be efficiently converted to a 64-bit int 204 /// for insertion into a \ref VarLocSet, and efficiently converted back. The 205 /// type-checker helps ensure that the conversions aren't lossy. 206 /// 207 /// Why encode a location /into/ the VarLocMap index? This makes it possible 208 /// to find the open VarLocs killed by a register def very quickly. This is a 209 /// performance-critical operation for LiveDebugValues. 210 struct LocIndex { 211 using u32_location_t = uint32_t; 212 using u32_index_t = uint32_t; 213 214 u32_location_t Location; // Physical registers live in the range [1;2^30) (see 215 // \ref MCRegister), so we have plenty of range left 216 // here to encode non-register locations. 217 u32_index_t Index; 218 219 /// The location that has an entry for every VarLoc in the map. 220 static constexpr u32_location_t kUniversalLocation = 0; 221 222 /// The first location that is reserved for VarLocs with locations of kind 223 /// RegisterKind. 224 static constexpr u32_location_t kFirstRegLocation = 1; 225 226 /// The first location greater than 0 that is not reserved for VarLocs with 227 /// locations of kind RegisterKind. 228 static constexpr u32_location_t kFirstInvalidRegLocation = 1 << 30; 229 230 /// A special location reserved for VarLocs with locations of kind 231 /// SpillLocKind. 232 static constexpr u32_location_t kSpillLocation = kFirstInvalidRegLocation; 233 234 /// A special location reserved for VarLocs of kind EntryValueBackupKind and 235 /// EntryValueCopyBackupKind. 236 static constexpr u32_location_t kEntryValueBackupLocation = 237 kFirstInvalidRegLocation + 1; 238 239 LocIndex(u32_location_t Location, u32_index_t Index) 240 : Location(Location), Index(Index) {} 241 242 uint64_t getAsRawInteger() const { 243 return (static_cast<uint64_t>(Location) << 32) | Index; 244 } 245 246 template<typename IntT> static LocIndex fromRawInteger(IntT ID) { 247 static_assert(std::is_unsigned<IntT>::value && 248 sizeof(ID) == sizeof(uint64_t), 249 "Cannot convert raw integer to LocIndex"); 250 return {static_cast<u32_location_t>(ID >> 32), 251 static_cast<u32_index_t>(ID)}; 252 } 253 254 /// Get the start of the interval reserved for VarLocs of kind RegisterKind 255 /// which reside in \p Reg. The end is at rawIndexForReg(Reg+1)-1. 256 static uint64_t rawIndexForReg(Register Reg) { 257 return LocIndex(Reg, 0).getAsRawInteger(); 258 } 259 260 /// Return a range covering all set indices in the interval reserved for 261 /// \p Location in \p Set. 262 static auto indexRangeForLocation(const VarLocSet &Set, 263 u32_location_t Location) { 264 uint64_t Start = LocIndex(Location, 0).getAsRawInteger(); 265 uint64_t End = LocIndex(Location + 1, 0).getAsRawInteger(); 266 return Set.half_open_range(Start, End); 267 } 268 }; 269 270 // Simple Set for storing all the VarLoc Indices at a Location bucket. 271 using VarLocsInRange = SmallSet<LocIndex::u32_index_t, 32>; 272 // Vector of all `LocIndex`s for a given VarLoc; the same Location should not 273 // appear in any two of these, as each VarLoc appears at most once in any 274 // Location bucket. 275 using LocIndices = SmallVector<LocIndex, 2>; 276 277 class VarLocBasedLDV : public LDVImpl { 278 private: 279 const TargetRegisterInfo *TRI; 280 const TargetInstrInfo *TII; 281 const TargetFrameLowering *TFI; 282 TargetPassConfig *TPC; 283 BitVector CalleeSavedRegs; 284 LexicalScopes LS; 285 VarLocSet::Allocator Alloc; 286 287 enum struct TransferKind { TransferCopy, TransferSpill, TransferRestore }; 288 289 using FragmentInfo = DIExpression::FragmentInfo; 290 using OptFragmentInfo = Optional<DIExpression::FragmentInfo>; 291 292 /// A pair of debug variable and value location. 293 struct VarLoc { 294 // The location at which a spilled variable resides. It consists of a 295 // register and an offset. 296 struct SpillLoc { 297 unsigned SpillBase; 298 StackOffset SpillOffset; 299 bool operator==(const SpillLoc &Other) const { 300 return SpillBase == Other.SpillBase && SpillOffset == Other.SpillOffset; 301 } 302 bool operator!=(const SpillLoc &Other) const { 303 return !(*this == Other); 304 } 305 }; 306 307 /// Identity of the variable at this location. 308 const DebugVariable Var; 309 310 /// The expression applied to this location. 311 const DIExpression *Expr; 312 313 /// DBG_VALUE to clone var/expr information from if this location 314 /// is moved. 315 const MachineInstr &MI; 316 317 enum class MachineLocKind { 318 InvalidKind = 0, 319 RegisterKind, 320 SpillLocKind, 321 ImmediateKind 322 }; 323 324 enum class EntryValueLocKind { 325 NonEntryValueKind = 0, 326 EntryValueKind, 327 EntryValueBackupKind, 328 EntryValueCopyBackupKind 329 } EVKind; 330 331 /// The value location. Stored separately to avoid repeatedly 332 /// extracting it from MI. 333 union MachineLocValue { 334 uint64_t RegNo; 335 SpillLoc SpillLocation; 336 uint64_t Hash; 337 int64_t Immediate; 338 const ConstantFP *FPImm; 339 const ConstantInt *CImm; 340 MachineLocValue() : Hash(0) {} 341 }; 342 343 /// A single machine location; its Kind is either a register, spill 344 /// location, or immediate value. 345 /// If the VarLoc is not a NonEntryValueKind, then it will use only a 346 /// single MachineLoc of RegisterKind. 347 struct MachineLoc { 348 MachineLocKind Kind; 349 MachineLocValue Value; 350 bool operator==(const MachineLoc &Other) const { 351 if (Kind != Other.Kind) 352 return false; 353 switch (Kind) { 354 case MachineLocKind::SpillLocKind: 355 return Value.SpillLocation == Other.Value.SpillLocation; 356 case MachineLocKind::RegisterKind: 357 case MachineLocKind::ImmediateKind: 358 return Value.Hash == Other.Value.Hash; 359 default: 360 llvm_unreachable("Invalid kind"); 361 } 362 } 363 bool operator<(const MachineLoc &Other) const { 364 switch (Kind) { 365 case MachineLocKind::SpillLocKind: 366 return std::make_tuple( 367 Kind, Value.SpillLocation.SpillBase, 368 Value.SpillLocation.SpillOffset.getFixed(), 369 Value.SpillLocation.SpillOffset.getScalable()) < 370 std::make_tuple( 371 Other.Kind, Other.Value.SpillLocation.SpillBase, 372 Other.Value.SpillLocation.SpillOffset.getFixed(), 373 Other.Value.SpillLocation.SpillOffset.getScalable()); 374 case MachineLocKind::RegisterKind: 375 case MachineLocKind::ImmediateKind: 376 return std::tie(Kind, Value.Hash) < 377 std::tie(Other.Kind, Other.Value.Hash); 378 default: 379 llvm_unreachable("Invalid kind"); 380 } 381 } 382 }; 383 384 /// The set of machine locations used to determine the variable's value, in 385 /// conjunction with Expr. Initially populated with MI's debug operands, 386 /// but may be transformed independently afterwards. 387 SmallVector<MachineLoc, 8> Locs; 388 /// Used to map the index of each location in Locs back to the index of its 389 /// original debug operand in MI. Used when multiple location operands are 390 /// coalesced and the original MI's operands need to be accessed while 391 /// emitting a debug value. 392 SmallVector<unsigned, 8> OrigLocMap; 393 394 VarLoc(const MachineInstr &MI, LexicalScopes &LS) 395 : Var(MI.getDebugVariable(), MI.getDebugExpression(), 396 MI.getDebugLoc()->getInlinedAt()), 397 Expr(MI.getDebugExpression()), MI(MI), 398 EVKind(EntryValueLocKind::NonEntryValueKind) { 399 assert(MI.isDebugValue() && "not a DBG_VALUE"); 400 assert((MI.isDebugValueList() || MI.getNumOperands() == 4) && 401 "malformed DBG_VALUE"); 402 for (const MachineOperand &Op : MI.debug_operands()) { 403 MachineLoc ML = GetLocForOp(Op); 404 auto It = find(Locs, ML); 405 if (It == Locs.end()) { 406 Locs.push_back(ML); 407 OrigLocMap.push_back(MI.getDebugOperandIndex(&Op)); 408 } else { 409 // ML duplicates an element in Locs; replace references to Op 410 // with references to the duplicating element. 411 unsigned OpIdx = Locs.size(); 412 unsigned DuplicatingIdx = std::distance(Locs.begin(), It); 413 Expr = DIExpression::replaceArg(Expr, OpIdx, DuplicatingIdx); 414 } 415 } 416 417 // We create the debug entry values from the factory functions rather 418 // than from this ctor. 419 assert(EVKind != EntryValueLocKind::EntryValueKind && 420 !isEntryBackupLoc()); 421 } 422 423 static MachineLoc GetLocForOp(const MachineOperand &Op) { 424 MachineLocKind Kind; 425 MachineLocValue Loc; 426 if (Op.isReg()) { 427 Kind = MachineLocKind::RegisterKind; 428 Loc.RegNo = Op.getReg(); 429 } else if (Op.isImm()) { 430 Kind = MachineLocKind::ImmediateKind; 431 Loc.Immediate = Op.getImm(); 432 } else if (Op.isFPImm()) { 433 Kind = MachineLocKind::ImmediateKind; 434 Loc.FPImm = Op.getFPImm(); 435 } else if (Op.isCImm()) { 436 Kind = MachineLocKind::ImmediateKind; 437 Loc.CImm = Op.getCImm(); 438 } else 439 llvm_unreachable("Invalid Op kind for MachineLoc."); 440 return {Kind, Loc}; 441 } 442 443 /// Take the variable and machine-location in DBG_VALUE MI, and build an 444 /// entry location using the given expression. 445 static VarLoc CreateEntryLoc(const MachineInstr &MI, LexicalScopes &LS, 446 const DIExpression *EntryExpr, Register Reg) { 447 VarLoc VL(MI, LS); 448 assert(VL.Locs.size() == 1 && 449 VL.Locs[0].Kind == MachineLocKind::RegisterKind); 450 VL.EVKind = EntryValueLocKind::EntryValueKind; 451 VL.Expr = EntryExpr; 452 VL.Locs[0].Value.RegNo = Reg; 453 return VL; 454 } 455 456 /// Take the variable and machine-location from the DBG_VALUE (from the 457 /// function entry), and build an entry value backup location. The backup 458 /// location will turn into the normal location if the backup is valid at 459 /// the time of the primary location clobbering. 460 static VarLoc CreateEntryBackupLoc(const MachineInstr &MI, 461 LexicalScopes &LS, 462 const DIExpression *EntryExpr) { 463 VarLoc VL(MI, LS); 464 assert(VL.Locs.size() == 1 && 465 VL.Locs[0].Kind == MachineLocKind::RegisterKind); 466 VL.EVKind = EntryValueLocKind::EntryValueBackupKind; 467 VL.Expr = EntryExpr; 468 return VL; 469 } 470 471 /// Take the variable and machine-location from the DBG_VALUE (from the 472 /// function entry), and build a copy of an entry value backup location by 473 /// setting the register location to NewReg. 474 static VarLoc CreateEntryCopyBackupLoc(const MachineInstr &MI, 475 LexicalScopes &LS, 476 const DIExpression *EntryExpr, 477 Register NewReg) { 478 VarLoc VL(MI, LS); 479 assert(VL.Locs.size() == 1 && 480 VL.Locs[0].Kind == MachineLocKind::RegisterKind); 481 VL.EVKind = EntryValueLocKind::EntryValueCopyBackupKind; 482 VL.Expr = EntryExpr; 483 VL.Locs[0].Value.RegNo = NewReg; 484 return VL; 485 } 486 487 /// Copy the register location in DBG_VALUE MI, updating the register to 488 /// be NewReg. 489 static VarLoc CreateCopyLoc(const VarLoc &OldVL, const MachineLoc &OldML, 490 Register NewReg) { 491 VarLoc VL = OldVL; 492 for (size_t I = 0, E = VL.Locs.size(); I < E; ++I) 493 if (VL.Locs[I] == OldML) { 494 VL.Locs[I].Kind = MachineLocKind::RegisterKind; 495 VL.Locs[I].Value.RegNo = NewReg; 496 return VL; 497 } 498 llvm_unreachable("Should have found OldML in new VarLoc."); 499 } 500 501 /// Take the variable described by DBG_VALUE* MI, and create a VarLoc 502 /// locating it in the specified spill location. 503 static VarLoc CreateSpillLoc(const VarLoc &OldVL, const MachineLoc &OldML, 504 unsigned SpillBase, StackOffset SpillOffset) { 505 VarLoc VL = OldVL; 506 for (int I = 0, E = VL.Locs.size(); I < E; ++I) 507 if (VL.Locs[I] == OldML) { 508 VL.Locs[I].Kind = MachineLocKind::SpillLocKind; 509 VL.Locs[I].Value.SpillLocation = {SpillBase, SpillOffset}; 510 return VL; 511 } 512 llvm_unreachable("Should have found OldML in new VarLoc."); 513 } 514 515 /// Create a DBG_VALUE representing this VarLoc in the given function. 516 /// Copies variable-specific information such as DILocalVariable and 517 /// inlining information from the original DBG_VALUE instruction, which may 518 /// have been several transfers ago. 519 MachineInstr *BuildDbgValue(MachineFunction &MF) const { 520 assert(!isEntryBackupLoc() && 521 "Tried to produce DBG_VALUE for backup VarLoc"); 522 const DebugLoc &DbgLoc = MI.getDebugLoc(); 523 bool Indirect = MI.isIndirectDebugValue(); 524 const auto &IID = MI.getDesc(); 525 const DILocalVariable *Var = MI.getDebugVariable(); 526 NumInserted++; 527 528 const DIExpression *DIExpr = Expr; 529 SmallVector<MachineOperand, 8> MOs; 530 for (unsigned I = 0, E = Locs.size(); I < E; ++I) { 531 MachineLocKind LocKind = Locs[I].Kind; 532 MachineLocValue Loc = Locs[I].Value; 533 const MachineOperand &Orig = MI.getDebugOperand(OrigLocMap[I]); 534 switch (LocKind) { 535 case MachineLocKind::RegisterKind: 536 // An entry value is a register location -- but with an updated 537 // expression. The register location of such DBG_VALUE is always the 538 // one from the entry DBG_VALUE, it does not matter if the entry value 539 // was copied in to another register due to some optimizations. 540 // Non-entry value register locations are like the source 541 // DBG_VALUE, but with the register number from this VarLoc. 542 MOs.push_back(MachineOperand::CreateReg( 543 EVKind == EntryValueLocKind::EntryValueKind ? Orig.getReg() 544 : Register(Loc.RegNo), 545 false)); 546 MOs.back().setIsDebug(); 547 break; 548 case MachineLocKind::SpillLocKind: { 549 // Spills are indirect DBG_VALUEs, with a base register and offset. 550 // Use the original DBG_VALUEs expression to build the spilt location 551 // on top of. FIXME: spill locations created before this pass runs 552 // are not recognized, and not handled here. 553 unsigned Base = Loc.SpillLocation.SpillBase; 554 auto *TRI = MF.getSubtarget().getRegisterInfo(); 555 if (MI.isNonListDebugValue()) { 556 DIExpr = 557 TRI->prependOffsetExpression(DIExpr, DIExpression::ApplyOffset, 558 Loc.SpillLocation.SpillOffset); 559 Indirect = true; 560 } else { 561 SmallVector<uint64_t, 4> Ops; 562 TRI->getOffsetOpcodes(Loc.SpillLocation.SpillOffset, Ops); 563 Ops.push_back(dwarf::DW_OP_deref); 564 DIExpr = DIExpression::appendOpsToArg(DIExpr, Ops, I); 565 } 566 MOs.push_back(MachineOperand::CreateReg(Base, false)); 567 MOs.back().setIsDebug(); 568 break; 569 } 570 case MachineLocKind::ImmediateKind: { 571 MOs.push_back(Orig); 572 break; 573 } 574 case MachineLocKind::InvalidKind: 575 llvm_unreachable("Tried to produce DBG_VALUE for invalid VarLoc"); 576 } 577 } 578 return BuildMI(MF, DbgLoc, IID, Indirect, MOs, Var, DIExpr); 579 } 580 581 /// Is the Loc field a constant or constant object? 582 bool isConstant(MachineLocKind Kind) const { 583 return Kind == MachineLocKind::ImmediateKind; 584 } 585 586 /// Check if the Loc field is an entry backup location. 587 bool isEntryBackupLoc() const { 588 return EVKind == EntryValueLocKind::EntryValueBackupKind || 589 EVKind == EntryValueLocKind::EntryValueCopyBackupKind; 590 } 591 592 /// If this variable is described by register \p Reg holding the entry 593 /// value, return true. 594 bool isEntryValueBackupReg(Register Reg) const { 595 return EVKind == EntryValueLocKind::EntryValueBackupKind && usesReg(Reg); 596 } 597 598 /// If this variable is described by register \p Reg holding a copy of the 599 /// entry value, return true. 600 bool isEntryValueCopyBackupReg(Register Reg) const { 601 return EVKind == EntryValueLocKind::EntryValueCopyBackupKind && 602 usesReg(Reg); 603 } 604 605 /// If this variable is described in whole or part by \p Reg, return true. 606 bool usesReg(Register Reg) const { 607 MachineLoc RegML; 608 RegML.Kind = MachineLocKind::RegisterKind; 609 RegML.Value.RegNo = Reg; 610 return is_contained(Locs, RegML); 611 } 612 613 /// If this variable is described in whole or part by \p Reg, return true. 614 unsigned getRegIdx(Register Reg) const { 615 for (unsigned Idx = 0; Idx < Locs.size(); ++Idx) 616 if (Locs[Idx].Kind == MachineLocKind::RegisterKind && 617 Locs[Idx].Value.RegNo == Reg) 618 return Idx; 619 llvm_unreachable("Could not find given Reg in Locs"); 620 } 621 622 /// If this variable is described in whole or part by 1 or more registers, 623 /// add each of them to \p Regs and return true. 624 bool getDescribingRegs(SmallVectorImpl<uint32_t> &Regs) const { 625 bool AnyRegs = false; 626 for (auto Loc : Locs) 627 if (Loc.Kind == MachineLocKind::RegisterKind) { 628 Regs.push_back(Loc.Value.RegNo); 629 AnyRegs = true; 630 } 631 return AnyRegs; 632 } 633 634 bool containsSpillLocs() const { 635 return any_of(Locs, [](VarLoc::MachineLoc ML) { 636 return ML.Kind == VarLoc::MachineLocKind::SpillLocKind; 637 }); 638 } 639 640 /// If this variable is described in whole or part by \p SpillLocation, 641 /// return true. 642 bool usesSpillLoc(SpillLoc SpillLocation) const { 643 MachineLoc SpillML; 644 SpillML.Kind = MachineLocKind::SpillLocKind; 645 SpillML.Value.SpillLocation = SpillLocation; 646 return is_contained(Locs, SpillML); 647 } 648 649 /// If this variable is described in whole or part by \p SpillLocation, 650 /// return the index . 651 unsigned getSpillLocIdx(SpillLoc SpillLocation) const { 652 for (unsigned Idx = 0; Idx < Locs.size(); ++Idx) 653 if (Locs[Idx].Kind == MachineLocKind::SpillLocKind && 654 Locs[Idx].Value.SpillLocation == SpillLocation) 655 return Idx; 656 llvm_unreachable("Could not find given SpillLoc in Locs"); 657 } 658 659 /// Determine whether the lexical scope of this value's debug location 660 /// dominates MBB. 661 bool dominates(LexicalScopes &LS, MachineBasicBlock &MBB) const { 662 return LS.dominates(MI.getDebugLoc().get(), &MBB); 663 } 664 665 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP) 666 // TRI can be null. 667 void dump(const TargetRegisterInfo *TRI, raw_ostream &Out = dbgs()) const { 668 Out << "VarLoc("; 669 for (const MachineLoc &MLoc : Locs) { 670 if (Locs.begin() != &MLoc) 671 Out << ", "; 672 switch (MLoc.Kind) { 673 case MachineLocKind::RegisterKind: 674 Out << printReg(MLoc.Value.RegNo, TRI); 675 break; 676 case MachineLocKind::SpillLocKind: 677 Out << printReg(MLoc.Value.SpillLocation.SpillBase, TRI); 678 Out << "[" << MLoc.Value.SpillLocation.SpillOffset.getFixed() << " + " 679 << MLoc.Value.SpillLocation.SpillOffset.getScalable() 680 << "x vscale" 681 << "]"; 682 break; 683 case MachineLocKind::ImmediateKind: 684 Out << MLoc.Value.Immediate; 685 break; 686 case MachineLocKind::InvalidKind: 687 llvm_unreachable("Invalid VarLoc in dump method"); 688 } 689 } 690 691 Out << ", \"" << Var.getVariable()->getName() << "\", " << *Expr << ", "; 692 if (Var.getInlinedAt()) 693 Out << "!" << Var.getInlinedAt()->getMetadataID() << ")\n"; 694 else 695 Out << "(null))"; 696 697 if (isEntryBackupLoc()) 698 Out << " (backup loc)\n"; 699 else 700 Out << "\n"; 701 } 702 #endif 703 704 bool operator==(const VarLoc &Other) const { 705 return std::tie(EVKind, Var, Expr, Locs) == 706 std::tie(Other.EVKind, Other.Var, Other.Expr, Other.Locs); 707 } 708 709 /// This operator guarantees that VarLocs are sorted by Variable first. 710 bool operator<(const VarLoc &Other) const { 711 return std::tie(Var, EVKind, Locs, Expr) < 712 std::tie(Other.Var, Other.EVKind, Other.Locs, Other.Expr); 713 } 714 }; 715 716 #ifndef NDEBUG 717 using VarVec = SmallVector<VarLoc, 32>; 718 #endif 719 720 /// VarLocMap is used for two things: 721 /// 1) Assigning LocIndices to a VarLoc. The LocIndices can be used to 722 /// virtually insert a VarLoc into a VarLocSet. 723 /// 2) Given a LocIndex, look up the unique associated VarLoc. 724 class VarLocMap { 725 /// Map a VarLoc to an index within the vector reserved for its location 726 /// within Loc2Vars. 727 std::map<VarLoc, LocIndices> Var2Indices; 728 729 /// Map a location to a vector which holds VarLocs which live in that 730 /// location. 731 SmallDenseMap<LocIndex::u32_location_t, std::vector<VarLoc>> Loc2Vars; 732 733 public: 734 /// Retrieve LocIndices for \p VL. 735 LocIndices insert(const VarLoc &VL) { 736 LocIndices &Indices = Var2Indices[VL]; 737 // If Indices is not empty, VL is already in the map. 738 if (!Indices.empty()) 739 return Indices; 740 SmallVector<LocIndex::u32_location_t, 4> Locations; 741 // LocIndices are determined by EVKind and MLs; each Register has a 742 // unique location, while all SpillLocs use a single bucket, and any EV 743 // VarLocs use only the Backup bucket or none at all (except the 744 // compulsory entry at the universal location index). LocIndices will 745 // always have an index at the universal location index as the last index. 746 if (VL.EVKind == VarLoc::EntryValueLocKind::NonEntryValueKind) { 747 VL.getDescribingRegs(Locations); 748 assert(all_of(Locations, 749 [](auto RegNo) { 750 return RegNo < LocIndex::kFirstInvalidRegLocation; 751 }) && 752 "Physreg out of range?"); 753 if (VL.containsSpillLocs()) { 754 LocIndex::u32_location_t Loc = LocIndex::kSpillLocation; 755 Locations.push_back(Loc); 756 } 757 } else if (VL.EVKind != VarLoc::EntryValueLocKind::EntryValueKind) { 758 LocIndex::u32_location_t Loc = LocIndex::kEntryValueBackupLocation; 759 Locations.push_back(Loc); 760 } 761 Locations.push_back(LocIndex::kUniversalLocation); 762 for (LocIndex::u32_location_t Location : Locations) { 763 auto &Vars = Loc2Vars[Location]; 764 Indices.push_back( 765 {Location, static_cast<LocIndex::u32_index_t>(Vars.size())}); 766 Vars.push_back(VL); 767 } 768 return Indices; 769 } 770 771 LocIndices getAllIndices(const VarLoc &VL) const { 772 auto IndIt = Var2Indices.find(VL); 773 assert(IndIt != Var2Indices.end() && "VarLoc not tracked"); 774 return IndIt->second; 775 } 776 777 /// Retrieve the unique VarLoc associated with \p ID. 778 const VarLoc &operator[](LocIndex ID) const { 779 auto LocIt = Loc2Vars.find(ID.Location); 780 assert(LocIt != Loc2Vars.end() && "Location not tracked"); 781 return LocIt->second[ID.Index]; 782 } 783 }; 784 785 using VarLocInMBB = 786 SmallDenseMap<const MachineBasicBlock *, std::unique_ptr<VarLocSet>>; 787 struct TransferDebugPair { 788 MachineInstr *TransferInst; ///< Instruction where this transfer occurs. 789 LocIndex LocationID; ///< Location number for the transfer dest. 790 }; 791 using TransferMap = SmallVector<TransferDebugPair, 4>; 792 793 // Types for recording sets of variable fragments that overlap. For a given 794 // local variable, we record all other fragments of that variable that could 795 // overlap it, to reduce search time. 796 using FragmentOfVar = 797 std::pair<const DILocalVariable *, DIExpression::FragmentInfo>; 798 using OverlapMap = 799 DenseMap<FragmentOfVar, SmallVector<DIExpression::FragmentInfo, 1>>; 800 801 // Helper while building OverlapMap, a map of all fragments seen for a given 802 // DILocalVariable. 803 using VarToFragments = 804 DenseMap<const DILocalVariable *, SmallSet<FragmentInfo, 4>>; 805 806 /// Collects all VarLocs from \p CollectFrom. Each unique VarLoc is added 807 /// to \p Collected once, in order of insertion into \p VarLocIDs. 808 static void collectAllVarLocs(SmallVectorImpl<VarLoc> &Collected, 809 const VarLocSet &CollectFrom, 810 const VarLocMap &VarLocIDs); 811 812 /// Get the registers which are used by VarLocs of kind RegisterKind tracked 813 /// by \p CollectFrom. 814 void getUsedRegs(const VarLocSet &CollectFrom, 815 SmallVectorImpl<Register> &UsedRegs) const; 816 817 /// This holds the working set of currently open ranges. For fast 818 /// access, this is done both as a set of VarLocIDs, and a map of 819 /// DebugVariable to recent VarLocID. Note that a DBG_VALUE ends all 820 /// previous open ranges for the same variable. In addition, we keep 821 /// two different maps (Vars/EntryValuesBackupVars), so erase/insert 822 /// methods act differently depending on whether a VarLoc is primary 823 /// location or backup one. In the case the VarLoc is backup location 824 /// we will erase/insert from the EntryValuesBackupVars map, otherwise 825 /// we perform the operation on the Vars. 826 class OpenRangesSet { 827 VarLocSet::Allocator &Alloc; 828 VarLocSet VarLocs; 829 // Map the DebugVariable to recent primary location ID. 830 SmallDenseMap<DebugVariable, LocIndices, 8> Vars; 831 // Map the DebugVariable to recent backup location ID. 832 SmallDenseMap<DebugVariable, LocIndices, 8> EntryValuesBackupVars; 833 OverlapMap &OverlappingFragments; 834 835 public: 836 OpenRangesSet(VarLocSet::Allocator &Alloc, OverlapMap &_OLapMap) 837 : Alloc(Alloc), VarLocs(Alloc), OverlappingFragments(_OLapMap) {} 838 839 const VarLocSet &getVarLocs() const { return VarLocs; } 840 841 // Fetches all VarLocs in \p VarLocIDs and inserts them into \p Collected. 842 // This method is needed to get every VarLoc once, as each VarLoc may have 843 // multiple indices in a VarLocMap (corresponding to each applicable 844 // location), but all VarLocs appear exactly once at the universal location 845 // index. 846 void getUniqueVarLocs(SmallVectorImpl<VarLoc> &Collected, 847 const VarLocMap &VarLocIDs) const { 848 collectAllVarLocs(Collected, VarLocs, VarLocIDs); 849 } 850 851 /// Terminate all open ranges for VL.Var by removing it from the set. 852 void erase(const VarLoc &VL); 853 854 /// Terminate all open ranges listed as indices in \c KillSet with 855 /// \c Location by removing them from the set. 856 void erase(const VarLocsInRange &KillSet, const VarLocMap &VarLocIDs, 857 LocIndex::u32_location_t Location); 858 859 /// Insert a new range into the set. 860 void insert(LocIndices VarLocIDs, const VarLoc &VL); 861 862 /// Insert a set of ranges. 863 void insertFromLocSet(const VarLocSet &ToLoad, const VarLocMap &Map); 864 865 llvm::Optional<LocIndices> getEntryValueBackup(DebugVariable Var); 866 867 /// Empty the set. 868 void clear() { 869 VarLocs.clear(); 870 Vars.clear(); 871 EntryValuesBackupVars.clear(); 872 } 873 874 /// Return whether the set is empty or not. 875 bool empty() const { 876 assert(Vars.empty() == EntryValuesBackupVars.empty() && 877 Vars.empty() == VarLocs.empty() && 878 "open ranges are inconsistent"); 879 return VarLocs.empty(); 880 } 881 882 /// Get an empty range of VarLoc IDs. 883 auto getEmptyVarLocRange() const { 884 return iterator_range<VarLocSet::const_iterator>(getVarLocs().end(), 885 getVarLocs().end()); 886 } 887 888 /// Get all set IDs for VarLocs with MLs of kind RegisterKind in \p Reg. 889 auto getRegisterVarLocs(Register Reg) const { 890 return LocIndex::indexRangeForLocation(getVarLocs(), Reg); 891 } 892 893 /// Get all set IDs for VarLocs with MLs of kind SpillLocKind. 894 auto getSpillVarLocs() const { 895 return LocIndex::indexRangeForLocation(getVarLocs(), 896 LocIndex::kSpillLocation); 897 } 898 899 /// Get all set IDs for VarLocs of EVKind EntryValueBackupKind or 900 /// EntryValueCopyBackupKind. 901 auto getEntryValueBackupVarLocs() const { 902 return LocIndex::indexRangeForLocation( 903 getVarLocs(), LocIndex::kEntryValueBackupLocation); 904 } 905 }; 906 907 /// Collect all VarLoc IDs from \p CollectFrom for VarLocs with MLs of kind 908 /// RegisterKind which are located in any reg in \p Regs. The IDs for each 909 /// VarLoc correspond to entries in the universal location bucket, which every 910 /// VarLoc has exactly 1 entry for. Insert collected IDs into \p Collected. 911 static void collectIDsForRegs(VarLocsInRange &Collected, 912 const DefinedRegsSet &Regs, 913 const VarLocSet &CollectFrom, 914 const VarLocMap &VarLocIDs); 915 916 VarLocSet &getVarLocsInMBB(const MachineBasicBlock *MBB, VarLocInMBB &Locs) { 917 std::unique_ptr<VarLocSet> &VLS = Locs[MBB]; 918 if (!VLS) 919 VLS = std::make_unique<VarLocSet>(Alloc); 920 return *VLS.get(); 921 } 922 923 const VarLocSet &getVarLocsInMBB(const MachineBasicBlock *MBB, 924 const VarLocInMBB &Locs) const { 925 auto It = Locs.find(MBB); 926 assert(It != Locs.end() && "MBB not in map"); 927 return *It->second.get(); 928 } 929 930 /// Tests whether this instruction is a spill to a stack location. 931 bool isSpillInstruction(const MachineInstr &MI, MachineFunction *MF); 932 933 /// Decide if @MI is a spill instruction and return true if it is. We use 2 934 /// criteria to make this decision: 935 /// - Is this instruction a store to a spill slot? 936 /// - Is there a register operand that is both used and killed? 937 /// TODO: Store optimization can fold spills into other stores (including 938 /// other spills). We do not handle this yet (more than one memory operand). 939 bool isLocationSpill(const MachineInstr &MI, MachineFunction *MF, 940 Register &Reg); 941 942 /// Returns true if the given machine instruction is a debug value which we 943 /// can emit entry values for. 944 /// 945 /// Currently, we generate debug entry values only for parameters that are 946 /// unmodified throughout the function and located in a register. 947 bool isEntryValueCandidate(const MachineInstr &MI, 948 const DefinedRegsSet &Regs) const; 949 950 /// If a given instruction is identified as a spill, return the spill location 951 /// and set \p Reg to the spilled register. 952 Optional<VarLoc::SpillLoc> isRestoreInstruction(const MachineInstr &MI, 953 MachineFunction *MF, 954 Register &Reg); 955 /// Given a spill instruction, extract the register and offset used to 956 /// address the spill location in a target independent way. 957 VarLoc::SpillLoc extractSpillBaseRegAndOffset(const MachineInstr &MI); 958 void insertTransferDebugPair(MachineInstr &MI, OpenRangesSet &OpenRanges, 959 TransferMap &Transfers, VarLocMap &VarLocIDs, 960 LocIndex OldVarID, TransferKind Kind, 961 const VarLoc::MachineLoc &OldLoc, 962 Register NewReg = Register()); 963 964 void transferDebugValue(const MachineInstr &MI, OpenRangesSet &OpenRanges, 965 VarLocMap &VarLocIDs); 966 void transferSpillOrRestoreInst(MachineInstr &MI, OpenRangesSet &OpenRanges, 967 VarLocMap &VarLocIDs, TransferMap &Transfers); 968 bool removeEntryValue(const MachineInstr &MI, OpenRangesSet &OpenRanges, 969 VarLocMap &VarLocIDs, const VarLoc &EntryVL); 970 void emitEntryValues(MachineInstr &MI, OpenRangesSet &OpenRanges, 971 VarLocMap &VarLocIDs, TransferMap &Transfers, 972 VarLocsInRange &KillSet); 973 void recordEntryValue(const MachineInstr &MI, 974 const DefinedRegsSet &DefinedRegs, 975 OpenRangesSet &OpenRanges, VarLocMap &VarLocIDs); 976 void transferRegisterCopy(MachineInstr &MI, OpenRangesSet &OpenRanges, 977 VarLocMap &VarLocIDs, TransferMap &Transfers); 978 void transferRegisterDef(MachineInstr &MI, OpenRangesSet &OpenRanges, 979 VarLocMap &VarLocIDs, TransferMap &Transfers); 980 bool transferTerminator(MachineBasicBlock *MBB, OpenRangesSet &OpenRanges, 981 VarLocInMBB &OutLocs, const VarLocMap &VarLocIDs); 982 983 void process(MachineInstr &MI, OpenRangesSet &OpenRanges, 984 VarLocMap &VarLocIDs, TransferMap &Transfers); 985 986 void accumulateFragmentMap(MachineInstr &MI, VarToFragments &SeenFragments, 987 OverlapMap &OLapMap); 988 989 bool join(MachineBasicBlock &MBB, VarLocInMBB &OutLocs, VarLocInMBB &InLocs, 990 const VarLocMap &VarLocIDs, 991 SmallPtrSet<const MachineBasicBlock *, 16> &Visited, 992 SmallPtrSetImpl<const MachineBasicBlock *> &ArtificialBlocks); 993 994 /// Create DBG_VALUE insts for inlocs that have been propagated but 995 /// had their instruction creation deferred. 996 void flushPendingLocs(VarLocInMBB &PendingInLocs, VarLocMap &VarLocIDs); 997 998 bool ExtendRanges(MachineFunction &MF, TargetPassConfig *TPC, 999 unsigned InputBBLimit, unsigned InputDbgValLimit) override; 1000 1001 public: 1002 /// Default construct and initialize the pass. 1003 VarLocBasedLDV(); 1004 1005 ~VarLocBasedLDV(); 1006 1007 /// Print to ostream with a message. 1008 void printVarLocInMBB(const MachineFunction &MF, const VarLocInMBB &V, 1009 const VarLocMap &VarLocIDs, const char *msg, 1010 raw_ostream &Out) const; 1011 }; 1012 1013 } // end anonymous namespace 1014 1015 //===----------------------------------------------------------------------===// 1016 // Implementation 1017 //===----------------------------------------------------------------------===// 1018 1019 VarLocBasedLDV::VarLocBasedLDV() { } 1020 1021 VarLocBasedLDV::~VarLocBasedLDV() { } 1022 1023 /// Erase a variable from the set of open ranges, and additionally erase any 1024 /// fragments that may overlap it. If the VarLoc is a backup location, erase 1025 /// the variable from the EntryValuesBackupVars set, indicating we should stop 1026 /// tracking its backup entry location. Otherwise, if the VarLoc is primary 1027 /// location, erase the variable from the Vars set. 1028 void VarLocBasedLDV::OpenRangesSet::erase(const VarLoc &VL) { 1029 // Erasure helper. 1030 auto DoErase = [VL, this](DebugVariable VarToErase) { 1031 auto *EraseFrom = VL.isEntryBackupLoc() ? &EntryValuesBackupVars : &Vars; 1032 auto It = EraseFrom->find(VarToErase); 1033 if (It != EraseFrom->end()) { 1034 LocIndices IDs = It->second; 1035 for (LocIndex ID : IDs) 1036 VarLocs.reset(ID.getAsRawInteger()); 1037 EraseFrom->erase(It); 1038 } 1039 }; 1040 1041 DebugVariable Var = VL.Var; 1042 1043 // Erase the variable/fragment that ends here. 1044 DoErase(Var); 1045 1046 // Extract the fragment. Interpret an empty fragment as one that covers all 1047 // possible bits. 1048 FragmentInfo ThisFragment = Var.getFragmentOrDefault(); 1049 1050 // There may be fragments that overlap the designated fragment. Look them up 1051 // in the pre-computed overlap map, and erase them too. 1052 auto MapIt = OverlappingFragments.find({Var.getVariable(), ThisFragment}); 1053 if (MapIt != OverlappingFragments.end()) { 1054 for (auto Fragment : MapIt->second) { 1055 VarLocBasedLDV::OptFragmentInfo FragmentHolder; 1056 if (!DebugVariable::isDefaultFragment(Fragment)) 1057 FragmentHolder = VarLocBasedLDV::OptFragmentInfo(Fragment); 1058 DoErase({Var.getVariable(), FragmentHolder, Var.getInlinedAt()}); 1059 } 1060 } 1061 } 1062 1063 void VarLocBasedLDV::OpenRangesSet::erase(const VarLocsInRange &KillSet, 1064 const VarLocMap &VarLocIDs, 1065 LocIndex::u32_location_t Location) { 1066 VarLocSet RemoveSet(Alloc); 1067 for (LocIndex::u32_index_t ID : KillSet) { 1068 const VarLoc &VL = VarLocIDs[LocIndex(Location, ID)]; 1069 auto *EraseFrom = VL.isEntryBackupLoc() ? &EntryValuesBackupVars : &Vars; 1070 EraseFrom->erase(VL.Var); 1071 LocIndices VLI = VarLocIDs.getAllIndices(VL); 1072 for (LocIndex ID : VLI) 1073 RemoveSet.set(ID.getAsRawInteger()); 1074 } 1075 VarLocs.intersectWithComplement(RemoveSet); 1076 } 1077 1078 void VarLocBasedLDV::OpenRangesSet::insertFromLocSet(const VarLocSet &ToLoad, 1079 const VarLocMap &Map) { 1080 VarLocsInRange UniqueVarLocIDs; 1081 DefinedRegsSet Regs; 1082 Regs.insert(LocIndex::kUniversalLocation); 1083 collectIDsForRegs(UniqueVarLocIDs, Regs, ToLoad, Map); 1084 for (uint64_t ID : UniqueVarLocIDs) { 1085 LocIndex Idx = LocIndex::fromRawInteger(ID); 1086 const VarLoc &VarL = Map[Idx]; 1087 const LocIndices Indices = Map.getAllIndices(VarL); 1088 insert(Indices, VarL); 1089 } 1090 } 1091 1092 void VarLocBasedLDV::OpenRangesSet::insert(LocIndices VarLocIDs, 1093 const VarLoc &VL) { 1094 auto *InsertInto = VL.isEntryBackupLoc() ? &EntryValuesBackupVars : &Vars; 1095 for (LocIndex ID : VarLocIDs) 1096 VarLocs.set(ID.getAsRawInteger()); 1097 InsertInto->insert({VL.Var, VarLocIDs}); 1098 } 1099 1100 /// Return the Loc ID of an entry value backup location, if it exists for the 1101 /// variable. 1102 llvm::Optional<LocIndices> 1103 VarLocBasedLDV::OpenRangesSet::getEntryValueBackup(DebugVariable Var) { 1104 auto It = EntryValuesBackupVars.find(Var); 1105 if (It != EntryValuesBackupVars.end()) 1106 return It->second; 1107 1108 return llvm::None; 1109 } 1110 1111 void VarLocBasedLDV::collectIDsForRegs(VarLocsInRange &Collected, 1112 const DefinedRegsSet &Regs, 1113 const VarLocSet &CollectFrom, 1114 const VarLocMap &VarLocIDs) { 1115 assert(!Regs.empty() && "Nothing to collect"); 1116 SmallVector<Register, 32> SortedRegs; 1117 append_range(SortedRegs, Regs); 1118 array_pod_sort(SortedRegs.begin(), SortedRegs.end()); 1119 auto It = CollectFrom.find(LocIndex::rawIndexForReg(SortedRegs.front())); 1120 auto End = CollectFrom.end(); 1121 for (Register Reg : SortedRegs) { 1122 // The half-open interval [FirstIndexForReg, FirstInvalidIndex) contains 1123 // all possible VarLoc IDs for VarLocs with MLs of kind RegisterKind which 1124 // live in Reg. 1125 uint64_t FirstIndexForReg = LocIndex::rawIndexForReg(Reg); 1126 uint64_t FirstInvalidIndex = LocIndex::rawIndexForReg(Reg + 1); 1127 It.advanceToLowerBound(FirstIndexForReg); 1128 1129 // Iterate through that half-open interval and collect all the set IDs. 1130 for (; It != End && *It < FirstInvalidIndex; ++It) { 1131 LocIndex ItIdx = LocIndex::fromRawInteger(*It); 1132 const VarLoc &VL = VarLocIDs[ItIdx]; 1133 LocIndices LI = VarLocIDs.getAllIndices(VL); 1134 // For now, the back index is always the universal location index. 1135 assert(LI.back().Location == LocIndex::kUniversalLocation && 1136 "Unexpected order of LocIndices for VarLoc; was it inserted into " 1137 "the VarLocMap correctly?"); 1138 Collected.insert(LI.back().Index); 1139 } 1140 1141 if (It == End) 1142 return; 1143 } 1144 } 1145 1146 void VarLocBasedLDV::getUsedRegs(const VarLocSet &CollectFrom, 1147 SmallVectorImpl<Register> &UsedRegs) const { 1148 // All register-based VarLocs are assigned indices greater than or equal to 1149 // FirstRegIndex. 1150 uint64_t FirstRegIndex = 1151 LocIndex::rawIndexForReg(LocIndex::kFirstRegLocation); 1152 uint64_t FirstInvalidIndex = 1153 LocIndex::rawIndexForReg(LocIndex::kFirstInvalidRegLocation); 1154 for (auto It = CollectFrom.find(FirstRegIndex), 1155 End = CollectFrom.find(FirstInvalidIndex); 1156 It != End;) { 1157 // We found a VarLoc ID for a VarLoc that lives in a register. Figure out 1158 // which register and add it to UsedRegs. 1159 uint32_t FoundReg = LocIndex::fromRawInteger(*It).Location; 1160 assert((UsedRegs.empty() || FoundReg != UsedRegs.back()) && 1161 "Duplicate used reg"); 1162 UsedRegs.push_back(FoundReg); 1163 1164 // Skip to the next /set/ register. Note that this finds a lower bound, so 1165 // even if there aren't any VarLocs living in `FoundReg+1`, we're still 1166 // guaranteed to move on to the next register (or to end()). 1167 uint64_t NextRegIndex = LocIndex::rawIndexForReg(FoundReg + 1); 1168 It.advanceToLowerBound(NextRegIndex); 1169 } 1170 } 1171 1172 //===----------------------------------------------------------------------===// 1173 // Debug Range Extension Implementation 1174 //===----------------------------------------------------------------------===// 1175 1176 #ifndef NDEBUG 1177 void VarLocBasedLDV::printVarLocInMBB(const MachineFunction &MF, 1178 const VarLocInMBB &V, 1179 const VarLocMap &VarLocIDs, 1180 const char *msg, 1181 raw_ostream &Out) const { 1182 Out << '\n' << msg << '\n'; 1183 for (const MachineBasicBlock &BB : MF) { 1184 if (!V.count(&BB)) 1185 continue; 1186 const VarLocSet &L = getVarLocsInMBB(&BB, V); 1187 if (L.empty()) 1188 continue; 1189 SmallVector<VarLoc, 32> VarLocs; 1190 collectAllVarLocs(VarLocs, L, VarLocIDs); 1191 Out << "MBB: " << BB.getNumber() << ":\n"; 1192 for (const VarLoc &VL : VarLocs) { 1193 Out << " Var: " << VL.Var.getVariable()->getName(); 1194 Out << " MI: "; 1195 VL.dump(TRI, Out); 1196 } 1197 } 1198 Out << "\n"; 1199 } 1200 #endif 1201 1202 VarLocBasedLDV::VarLoc::SpillLoc 1203 VarLocBasedLDV::extractSpillBaseRegAndOffset(const MachineInstr &MI) { 1204 assert(MI.hasOneMemOperand() && 1205 "Spill instruction does not have exactly one memory operand?"); 1206 auto MMOI = MI.memoperands_begin(); 1207 const PseudoSourceValue *PVal = (*MMOI)->getPseudoValue(); 1208 assert(PVal->kind() == PseudoSourceValue::FixedStack && 1209 "Inconsistent memory operand in spill instruction"); 1210 int FI = cast<FixedStackPseudoSourceValue>(PVal)->getFrameIndex(); 1211 const MachineBasicBlock *MBB = MI.getParent(); 1212 Register Reg; 1213 StackOffset Offset = TFI->getFrameIndexReference(*MBB->getParent(), FI, Reg); 1214 return {Reg, Offset}; 1215 } 1216 1217 /// Try to salvage the debug entry value if we encounter a new debug value 1218 /// describing the same parameter, otherwise stop tracking the value. Return 1219 /// true if we should stop tracking the entry value, otherwise return false. 1220 bool VarLocBasedLDV::removeEntryValue(const MachineInstr &MI, 1221 OpenRangesSet &OpenRanges, 1222 VarLocMap &VarLocIDs, 1223 const VarLoc &EntryVL) { 1224 // Skip the DBG_VALUE which is the debug entry value itself. 1225 if (MI.isIdenticalTo(EntryVL.MI)) 1226 return false; 1227 1228 // If the parameter's location is not register location, we can not track 1229 // the entry value any more. In addition, if the debug expression from the 1230 // DBG_VALUE is not empty, we can assume the parameter's value has changed 1231 // indicating that we should stop tracking its entry value as well. 1232 if (!MI.getDebugOperand(0).isReg() || 1233 MI.getDebugExpression()->getNumElements() != 0) 1234 return true; 1235 1236 // If the DBG_VALUE comes from a copy instruction that copies the entry value, 1237 // it means the parameter's value has not changed and we should be able to use 1238 // its entry value. 1239 Register Reg = MI.getDebugOperand(0).getReg(); 1240 auto I = std::next(MI.getReverseIterator()); 1241 const MachineOperand *SrcRegOp, *DestRegOp; 1242 if (I != MI.getParent()->rend()) { 1243 1244 // TODO: Try to keep tracking of an entry value if we encounter a propagated 1245 // DBG_VALUE describing the copy of the entry value. (Propagated entry value 1246 // does not indicate the parameter modification.) 1247 auto DestSrc = TII->isCopyInstr(*I); 1248 if (!DestSrc) 1249 return true; 1250 1251 SrcRegOp = DestSrc->Source; 1252 DestRegOp = DestSrc->Destination; 1253 if (Reg != DestRegOp->getReg()) 1254 return true; 1255 1256 for (uint64_t ID : OpenRanges.getEntryValueBackupVarLocs()) { 1257 const VarLoc &VL = VarLocIDs[LocIndex::fromRawInteger(ID)]; 1258 if (VL.isEntryValueCopyBackupReg(Reg) && 1259 // Entry Values should not be variadic. 1260 VL.MI.getDebugOperand(0).getReg() == SrcRegOp->getReg()) 1261 return false; 1262 } 1263 } 1264 1265 return true; 1266 } 1267 1268 /// End all previous ranges related to @MI and start a new range from @MI 1269 /// if it is a DBG_VALUE instr. 1270 void VarLocBasedLDV::transferDebugValue(const MachineInstr &MI, 1271 OpenRangesSet &OpenRanges, 1272 VarLocMap &VarLocIDs) { 1273 if (!MI.isDebugValue()) 1274 return; 1275 const DILocalVariable *Var = MI.getDebugVariable(); 1276 const DIExpression *Expr = MI.getDebugExpression(); 1277 const DILocation *DebugLoc = MI.getDebugLoc(); 1278 const DILocation *InlinedAt = DebugLoc->getInlinedAt(); 1279 assert(Var->isValidLocationForIntrinsic(DebugLoc) && 1280 "Expected inlined-at fields to agree"); 1281 1282 DebugVariable V(Var, Expr, InlinedAt); 1283 1284 // Check if this DBG_VALUE indicates a parameter's value changing. 1285 // If that is the case, we should stop tracking its entry value. 1286 auto EntryValBackupID = OpenRanges.getEntryValueBackup(V); 1287 if (Var->isParameter() && EntryValBackupID) { 1288 const VarLoc &EntryVL = VarLocIDs[EntryValBackupID->back()]; 1289 if (removeEntryValue(MI, OpenRanges, VarLocIDs, EntryVL)) { 1290 LLVM_DEBUG(dbgs() << "Deleting a DBG entry value because of: "; 1291 MI.print(dbgs(), /*IsStandalone*/ false, 1292 /*SkipOpers*/ false, /*SkipDebugLoc*/ false, 1293 /*AddNewLine*/ true, TII)); 1294 OpenRanges.erase(EntryVL); 1295 } 1296 } 1297 1298 if (all_of(MI.debug_operands(), [](const MachineOperand &MO) { 1299 return (MO.isReg() && MO.getReg()) || MO.isImm() || MO.isFPImm() || 1300 MO.isCImm(); 1301 })) { 1302 // Use normal VarLoc constructor for registers and immediates. 1303 VarLoc VL(MI, LS); 1304 // End all previous ranges of VL.Var. 1305 OpenRanges.erase(VL); 1306 1307 LocIndices IDs = VarLocIDs.insert(VL); 1308 // Add the VarLoc to OpenRanges from this DBG_VALUE. 1309 OpenRanges.insert(IDs, VL); 1310 } else if (MI.memoperands().size() > 0) { 1311 llvm_unreachable("DBG_VALUE with mem operand encountered after regalloc?"); 1312 } else { 1313 // This must be an undefined location. If it has an open range, erase it. 1314 assert(MI.isUndefDebugValue() && 1315 "Unexpected non-undef DBG_VALUE encountered"); 1316 VarLoc VL(MI, LS); 1317 OpenRanges.erase(VL); 1318 } 1319 } 1320 1321 // This should be removed later, doesn't fit the new design. 1322 void VarLocBasedLDV::collectAllVarLocs(SmallVectorImpl<VarLoc> &Collected, 1323 const VarLocSet &CollectFrom, 1324 const VarLocMap &VarLocIDs) { 1325 // The half-open interval [FirstIndexForReg, FirstInvalidIndex) contains all 1326 // possible VarLoc IDs for VarLocs with MLs of kind RegisterKind which live 1327 // in Reg. 1328 uint64_t FirstIndex = LocIndex::rawIndexForReg(LocIndex::kUniversalLocation); 1329 uint64_t FirstInvalidIndex = 1330 LocIndex::rawIndexForReg(LocIndex::kUniversalLocation + 1); 1331 // Iterate through that half-open interval and collect all the set IDs. 1332 for (auto It = CollectFrom.find(FirstIndex), End = CollectFrom.end(); 1333 It != End && *It < FirstInvalidIndex; ++It) { 1334 LocIndex RegIdx = LocIndex::fromRawInteger(*It); 1335 Collected.push_back(VarLocIDs[RegIdx]); 1336 } 1337 } 1338 1339 /// Turn the entry value backup locations into primary locations. 1340 void VarLocBasedLDV::emitEntryValues(MachineInstr &MI, 1341 OpenRangesSet &OpenRanges, 1342 VarLocMap &VarLocIDs, 1343 TransferMap &Transfers, 1344 VarLocsInRange &KillSet) { 1345 // Do not insert entry value locations after a terminator. 1346 if (MI.isTerminator()) 1347 return; 1348 1349 for (uint32_t ID : KillSet) { 1350 // The KillSet IDs are indices for the universal location bucket. 1351 LocIndex Idx = LocIndex(LocIndex::kUniversalLocation, ID); 1352 const VarLoc &VL = VarLocIDs[Idx]; 1353 if (!VL.Var.getVariable()->isParameter()) 1354 continue; 1355 1356 auto DebugVar = VL.Var; 1357 Optional<LocIndices> EntryValBackupIDs = 1358 OpenRanges.getEntryValueBackup(DebugVar); 1359 1360 // If the parameter has the entry value backup, it means we should 1361 // be able to use its entry value. 1362 if (!EntryValBackupIDs) 1363 continue; 1364 1365 const VarLoc &EntryVL = VarLocIDs[EntryValBackupIDs->back()]; 1366 VarLoc EntryLoc = VarLoc::CreateEntryLoc(EntryVL.MI, LS, EntryVL.Expr, 1367 EntryVL.Locs[0].Value.RegNo); 1368 LocIndices EntryValueIDs = VarLocIDs.insert(EntryLoc); 1369 Transfers.push_back({&MI, EntryValueIDs.back()}); 1370 OpenRanges.insert(EntryValueIDs, EntryLoc); 1371 } 1372 } 1373 1374 /// Create new TransferDebugPair and insert it in \p Transfers. The VarLoc 1375 /// with \p OldVarID should be deleted form \p OpenRanges and replaced with 1376 /// new VarLoc. If \p NewReg is different than default zero value then the 1377 /// new location will be register location created by the copy like instruction, 1378 /// otherwise it is variable's location on the stack. 1379 void VarLocBasedLDV::insertTransferDebugPair( 1380 MachineInstr &MI, OpenRangesSet &OpenRanges, TransferMap &Transfers, 1381 VarLocMap &VarLocIDs, LocIndex OldVarID, TransferKind Kind, 1382 const VarLoc::MachineLoc &OldLoc, Register NewReg) { 1383 const VarLoc &OldVarLoc = VarLocIDs[OldVarID]; 1384 1385 auto ProcessVarLoc = [&MI, &OpenRanges, &Transfers, &VarLocIDs](VarLoc &VL) { 1386 LocIndices LocIds = VarLocIDs.insert(VL); 1387 1388 // Close this variable's previous location range. 1389 OpenRanges.erase(VL); 1390 1391 // Record the new location as an open range, and a postponed transfer 1392 // inserting a DBG_VALUE for this location. 1393 OpenRanges.insert(LocIds, VL); 1394 assert(!MI.isTerminator() && "Cannot insert DBG_VALUE after terminator"); 1395 TransferDebugPair MIP = {&MI, LocIds.back()}; 1396 Transfers.push_back(MIP); 1397 }; 1398 1399 // End all previous ranges of VL.Var. 1400 OpenRanges.erase(VarLocIDs[OldVarID]); 1401 switch (Kind) { 1402 case TransferKind::TransferCopy: { 1403 assert(NewReg && 1404 "No register supplied when handling a copy of a debug value"); 1405 // Create a DBG_VALUE instruction to describe the Var in its new 1406 // register location. 1407 VarLoc VL = VarLoc::CreateCopyLoc(OldVarLoc, OldLoc, NewReg); 1408 ProcessVarLoc(VL); 1409 LLVM_DEBUG({ 1410 dbgs() << "Creating VarLoc for register copy:"; 1411 VL.dump(TRI); 1412 }); 1413 return; 1414 } 1415 case TransferKind::TransferSpill: { 1416 // Create a DBG_VALUE instruction to describe the Var in its spilled 1417 // location. 1418 VarLoc::SpillLoc SpillLocation = extractSpillBaseRegAndOffset(MI); 1419 VarLoc VL = VarLoc::CreateSpillLoc( 1420 OldVarLoc, OldLoc, SpillLocation.SpillBase, SpillLocation.SpillOffset); 1421 ProcessVarLoc(VL); 1422 LLVM_DEBUG({ 1423 dbgs() << "Creating VarLoc for spill:"; 1424 VL.dump(TRI); 1425 }); 1426 return; 1427 } 1428 case TransferKind::TransferRestore: { 1429 assert(NewReg && 1430 "No register supplied when handling a restore of a debug value"); 1431 // DebugInstr refers to the pre-spill location, therefore we can reuse 1432 // its expression. 1433 VarLoc VL = VarLoc::CreateCopyLoc(OldVarLoc, OldLoc, NewReg); 1434 ProcessVarLoc(VL); 1435 LLVM_DEBUG({ 1436 dbgs() << "Creating VarLoc for restore:"; 1437 VL.dump(TRI); 1438 }); 1439 return; 1440 } 1441 } 1442 llvm_unreachable("Invalid transfer kind"); 1443 } 1444 1445 /// A definition of a register may mark the end of a range. 1446 void VarLocBasedLDV::transferRegisterDef( 1447 MachineInstr &MI, OpenRangesSet &OpenRanges, VarLocMap &VarLocIDs, 1448 TransferMap &Transfers) { 1449 1450 // Meta Instructions do not affect the debug liveness of any register they 1451 // define. 1452 if (MI.isMetaInstruction()) 1453 return; 1454 1455 MachineFunction *MF = MI.getMF(); 1456 const TargetLowering *TLI = MF->getSubtarget().getTargetLowering(); 1457 Register SP = TLI->getStackPointerRegisterToSaveRestore(); 1458 1459 // Find the regs killed by MI, and find regmasks of preserved regs. 1460 DefinedRegsSet DeadRegs; 1461 SmallVector<const uint32_t *, 4> RegMasks; 1462 for (const MachineOperand &MO : MI.operands()) { 1463 // Determine whether the operand is a register def. 1464 if (MO.isReg() && MO.isDef() && MO.getReg() && 1465 Register::isPhysicalRegister(MO.getReg()) && 1466 !(MI.isCall() && MO.getReg() == SP)) { 1467 // Remove ranges of all aliased registers. 1468 for (MCRegAliasIterator RAI(MO.getReg(), TRI, true); RAI.isValid(); ++RAI) 1469 // FIXME: Can we break out of this loop early if no insertion occurs? 1470 DeadRegs.insert(*RAI); 1471 } else if (MO.isRegMask()) { 1472 RegMasks.push_back(MO.getRegMask()); 1473 } 1474 } 1475 1476 // Erase VarLocs which reside in one of the dead registers. For performance 1477 // reasons, it's critical to not iterate over the full set of open VarLocs. 1478 // Iterate over the set of dying/used regs instead. 1479 if (!RegMasks.empty()) { 1480 SmallVector<Register, 32> UsedRegs; 1481 getUsedRegs(OpenRanges.getVarLocs(), UsedRegs); 1482 for (Register Reg : UsedRegs) { 1483 // Remove ranges of all clobbered registers. Register masks don't usually 1484 // list SP as preserved. Assume that call instructions never clobber SP, 1485 // because some backends (e.g., AArch64) never list SP in the regmask. 1486 // While the debug info may be off for an instruction or two around 1487 // callee-cleanup calls, transferring the DEBUG_VALUE across the call is 1488 // still a better user experience. 1489 if (Reg == SP) 1490 continue; 1491 bool AnyRegMaskKillsReg = 1492 any_of(RegMasks, [Reg](const uint32_t *RegMask) { 1493 return MachineOperand::clobbersPhysReg(RegMask, Reg); 1494 }); 1495 if (AnyRegMaskKillsReg) 1496 DeadRegs.insert(Reg); 1497 } 1498 } 1499 1500 if (DeadRegs.empty()) 1501 return; 1502 1503 VarLocsInRange KillSet; 1504 collectIDsForRegs(KillSet, DeadRegs, OpenRanges.getVarLocs(), VarLocIDs); 1505 OpenRanges.erase(KillSet, VarLocIDs, LocIndex::kUniversalLocation); 1506 1507 if (TPC) { 1508 auto &TM = TPC->getTM<TargetMachine>(); 1509 if (TM.Options.ShouldEmitDebugEntryValues()) 1510 emitEntryValues(MI, OpenRanges, VarLocIDs, Transfers, KillSet); 1511 } 1512 } 1513 1514 bool VarLocBasedLDV::isSpillInstruction(const MachineInstr &MI, 1515 MachineFunction *MF) { 1516 // TODO: Handle multiple stores folded into one. 1517 if (!MI.hasOneMemOperand()) 1518 return false; 1519 1520 if (!MI.getSpillSize(TII) && !MI.getFoldedSpillSize(TII)) 1521 return false; // This is not a spill instruction, since no valid size was 1522 // returned from either function. 1523 1524 return true; 1525 } 1526 1527 bool VarLocBasedLDV::isLocationSpill(const MachineInstr &MI, 1528 MachineFunction *MF, Register &Reg) { 1529 if (!isSpillInstruction(MI, MF)) 1530 return false; 1531 1532 auto isKilledReg = [&](const MachineOperand MO, Register &Reg) { 1533 if (!MO.isReg() || !MO.isUse()) { 1534 Reg = 0; 1535 return false; 1536 } 1537 Reg = MO.getReg(); 1538 return MO.isKill(); 1539 }; 1540 1541 for (const MachineOperand &MO : MI.operands()) { 1542 // In a spill instruction generated by the InlineSpiller the spilled 1543 // register has its kill flag set. 1544 if (isKilledReg(MO, Reg)) 1545 return true; 1546 if (Reg != 0) { 1547 // Check whether next instruction kills the spilled register. 1548 // FIXME: Current solution does not cover search for killed register in 1549 // bundles and instructions further down the chain. 1550 auto NextI = std::next(MI.getIterator()); 1551 // Skip next instruction that points to basic block end iterator. 1552 if (MI.getParent()->end() == NextI) 1553 continue; 1554 Register RegNext; 1555 for (const MachineOperand &MONext : NextI->operands()) { 1556 // Return true if we came across the register from the 1557 // previous spill instruction that is killed in NextI. 1558 if (isKilledReg(MONext, RegNext) && RegNext == Reg) 1559 return true; 1560 } 1561 } 1562 } 1563 // Return false if we didn't find spilled register. 1564 return false; 1565 } 1566 1567 Optional<VarLocBasedLDV::VarLoc::SpillLoc> 1568 VarLocBasedLDV::isRestoreInstruction(const MachineInstr &MI, 1569 MachineFunction *MF, Register &Reg) { 1570 if (!MI.hasOneMemOperand()) 1571 return None; 1572 1573 // FIXME: Handle folded restore instructions with more than one memory 1574 // operand. 1575 if (MI.getRestoreSize(TII)) { 1576 Reg = MI.getOperand(0).getReg(); 1577 return extractSpillBaseRegAndOffset(MI); 1578 } 1579 return None; 1580 } 1581 1582 /// A spilled register may indicate that we have to end the current range of 1583 /// a variable and create a new one for the spill location. 1584 /// A restored register may indicate the reverse situation. 1585 /// We don't want to insert any instructions in process(), so we just create 1586 /// the DBG_VALUE without inserting it and keep track of it in \p Transfers. 1587 /// It will be inserted into the BB when we're done iterating over the 1588 /// instructions. 1589 void VarLocBasedLDV::transferSpillOrRestoreInst(MachineInstr &MI, 1590 OpenRangesSet &OpenRanges, 1591 VarLocMap &VarLocIDs, 1592 TransferMap &Transfers) { 1593 MachineFunction *MF = MI.getMF(); 1594 TransferKind TKind; 1595 Register Reg; 1596 Optional<VarLoc::SpillLoc> Loc; 1597 1598 LLVM_DEBUG(dbgs() << "Examining instruction: "; MI.dump();); 1599 1600 // First, if there are any DBG_VALUEs pointing at a spill slot that is 1601 // written to, then close the variable location. The value in memory 1602 // will have changed. 1603 VarLocsInRange KillSet; 1604 if (isSpillInstruction(MI, MF)) { 1605 Loc = extractSpillBaseRegAndOffset(MI); 1606 for (uint64_t ID : OpenRanges.getSpillVarLocs()) { 1607 LocIndex Idx = LocIndex::fromRawInteger(ID); 1608 const VarLoc &VL = VarLocIDs[Idx]; 1609 assert(VL.containsSpillLocs() && "Broken VarLocSet?"); 1610 if (VL.usesSpillLoc(*Loc)) { 1611 // This location is overwritten by the current instruction -- terminate 1612 // the open range, and insert an explicit DBG_VALUE $noreg. 1613 // 1614 // Doing this at a later stage would require re-interpreting all 1615 // DBG_VALUes and DIExpressions to identify whether they point at 1616 // memory, and then analysing all memory writes to see if they 1617 // overwrite that memory, which is expensive. 1618 // 1619 // At this stage, we already know which DBG_VALUEs are for spills and 1620 // where they are located; it's best to fix handle overwrites now. 1621 KillSet.insert(ID); 1622 unsigned SpillLocIdx = VL.getSpillLocIdx(*Loc); 1623 VarLoc::MachineLoc OldLoc = VL.Locs[SpillLocIdx]; 1624 VarLoc UndefVL = VarLoc::CreateCopyLoc(VL, OldLoc, 0); 1625 LocIndices UndefLocIDs = VarLocIDs.insert(UndefVL); 1626 Transfers.push_back({&MI, UndefLocIDs.back()}); 1627 } 1628 } 1629 OpenRanges.erase(KillSet, VarLocIDs, LocIndex::kSpillLocation); 1630 } 1631 1632 // Try to recognise spill and restore instructions that may create a new 1633 // variable location. 1634 if (isLocationSpill(MI, MF, Reg)) { 1635 TKind = TransferKind::TransferSpill; 1636 LLVM_DEBUG(dbgs() << "Recognized as spill: "; MI.dump();); 1637 LLVM_DEBUG(dbgs() << "Register: " << Reg << " " << printReg(Reg, TRI) 1638 << "\n"); 1639 } else { 1640 if (!(Loc = isRestoreInstruction(MI, MF, Reg))) 1641 return; 1642 TKind = TransferKind::TransferRestore; 1643 LLVM_DEBUG(dbgs() << "Recognized as restore: "; MI.dump();); 1644 LLVM_DEBUG(dbgs() << "Register: " << Reg << " " << printReg(Reg, TRI) 1645 << "\n"); 1646 } 1647 // Check if the register or spill location is the location of a debug value. 1648 auto TransferCandidates = OpenRanges.getEmptyVarLocRange(); 1649 if (TKind == TransferKind::TransferSpill) 1650 TransferCandidates = OpenRanges.getRegisterVarLocs(Reg); 1651 else if (TKind == TransferKind::TransferRestore) 1652 TransferCandidates = OpenRanges.getSpillVarLocs(); 1653 for (uint64_t ID : TransferCandidates) { 1654 LocIndex Idx = LocIndex::fromRawInteger(ID); 1655 const VarLoc &VL = VarLocIDs[Idx]; 1656 unsigned LocIdx; 1657 if (TKind == TransferKind::TransferSpill) { 1658 assert(VL.usesReg(Reg) && "Broken VarLocSet?"); 1659 LLVM_DEBUG(dbgs() << "Spilling Register " << printReg(Reg, TRI) << '(' 1660 << VL.Var.getVariable()->getName() << ")\n"); 1661 LocIdx = VL.getRegIdx(Reg); 1662 } else { 1663 assert(TKind == TransferKind::TransferRestore && VL.containsSpillLocs() && 1664 "Broken VarLocSet?"); 1665 if (!VL.usesSpillLoc(*Loc)) 1666 // The spill location is not the location of a debug value. 1667 continue; 1668 LLVM_DEBUG(dbgs() << "Restoring Register " << printReg(Reg, TRI) << '(' 1669 << VL.Var.getVariable()->getName() << ")\n"); 1670 LocIdx = VL.getSpillLocIdx(*Loc); 1671 } 1672 VarLoc::MachineLoc MLoc = VL.Locs[LocIdx]; 1673 insertTransferDebugPair(MI, OpenRanges, Transfers, VarLocIDs, Idx, TKind, 1674 MLoc, Reg); 1675 // FIXME: A comment should explain why it's correct to return early here, 1676 // if that is in fact correct. 1677 return; 1678 } 1679 } 1680 1681 /// If \p MI is a register copy instruction, that copies a previously tracked 1682 /// value from one register to another register that is callee saved, we 1683 /// create new DBG_VALUE instruction described with copy destination register. 1684 void VarLocBasedLDV::transferRegisterCopy(MachineInstr &MI, 1685 OpenRangesSet &OpenRanges, 1686 VarLocMap &VarLocIDs, 1687 TransferMap &Transfers) { 1688 auto DestSrc = TII->isCopyInstr(MI); 1689 if (!DestSrc) 1690 return; 1691 1692 const MachineOperand *DestRegOp = DestSrc->Destination; 1693 const MachineOperand *SrcRegOp = DestSrc->Source; 1694 1695 if (!DestRegOp->isDef()) 1696 return; 1697 1698 auto isCalleeSavedReg = [&](Register Reg) { 1699 for (MCRegAliasIterator RAI(Reg, TRI, true); RAI.isValid(); ++RAI) 1700 if (CalleeSavedRegs.test(*RAI)) 1701 return true; 1702 return false; 1703 }; 1704 1705 Register SrcReg = SrcRegOp->getReg(); 1706 Register DestReg = DestRegOp->getReg(); 1707 1708 // We want to recognize instructions where destination register is callee 1709 // saved register. If register that could be clobbered by the call is 1710 // included, there would be a great chance that it is going to be clobbered 1711 // soon. It is more likely that previous register location, which is callee 1712 // saved, is going to stay unclobbered longer, even if it is killed. 1713 if (!isCalleeSavedReg(DestReg)) 1714 return; 1715 1716 // Remember an entry value movement. If we encounter a new debug value of 1717 // a parameter describing only a moving of the value around, rather then 1718 // modifying it, we are still able to use the entry value if needed. 1719 if (isRegOtherThanSPAndFP(*DestRegOp, MI, TRI)) { 1720 for (uint64_t ID : OpenRanges.getEntryValueBackupVarLocs()) { 1721 LocIndex Idx = LocIndex::fromRawInteger(ID); 1722 const VarLoc &VL = VarLocIDs[Idx]; 1723 if (VL.isEntryValueBackupReg(SrcReg)) { 1724 LLVM_DEBUG(dbgs() << "Copy of the entry value: "; MI.dump();); 1725 VarLoc EntryValLocCopyBackup = 1726 VarLoc::CreateEntryCopyBackupLoc(VL.MI, LS, VL.Expr, DestReg); 1727 // Stop tracking the original entry value. 1728 OpenRanges.erase(VL); 1729 1730 // Start tracking the entry value copy. 1731 LocIndices EntryValCopyLocIDs = VarLocIDs.insert(EntryValLocCopyBackup); 1732 OpenRanges.insert(EntryValCopyLocIDs, EntryValLocCopyBackup); 1733 break; 1734 } 1735 } 1736 } 1737 1738 if (!SrcRegOp->isKill()) 1739 return; 1740 1741 for (uint64_t ID : OpenRanges.getRegisterVarLocs(SrcReg)) { 1742 LocIndex Idx = LocIndex::fromRawInteger(ID); 1743 assert(VarLocIDs[Idx].usesReg(SrcReg) && "Broken VarLocSet?"); 1744 VarLoc::MachineLocValue Loc; 1745 Loc.RegNo = SrcReg; 1746 VarLoc::MachineLoc MLoc{VarLoc::MachineLocKind::RegisterKind, Loc}; 1747 insertTransferDebugPair(MI, OpenRanges, Transfers, VarLocIDs, Idx, 1748 TransferKind::TransferCopy, MLoc, DestReg); 1749 // FIXME: A comment should explain why it's correct to return early here, 1750 // if that is in fact correct. 1751 return; 1752 } 1753 } 1754 1755 /// Terminate all open ranges at the end of the current basic block. 1756 bool VarLocBasedLDV::transferTerminator(MachineBasicBlock *CurMBB, 1757 OpenRangesSet &OpenRanges, 1758 VarLocInMBB &OutLocs, 1759 const VarLocMap &VarLocIDs) { 1760 bool Changed = false; 1761 LLVM_DEBUG({ 1762 VarVec VarLocs; 1763 OpenRanges.getUniqueVarLocs(VarLocs, VarLocIDs); 1764 for (VarLoc &VL : VarLocs) { 1765 // Copy OpenRanges to OutLocs, if not already present. 1766 dbgs() << "Add to OutLocs in MBB #" << CurMBB->getNumber() << ": "; 1767 VL.dump(TRI); 1768 } 1769 }); 1770 VarLocSet &VLS = getVarLocsInMBB(CurMBB, OutLocs); 1771 Changed = VLS != OpenRanges.getVarLocs(); 1772 // New OutLocs set may be different due to spill, restore or register 1773 // copy instruction processing. 1774 if (Changed) 1775 VLS = OpenRanges.getVarLocs(); 1776 OpenRanges.clear(); 1777 return Changed; 1778 } 1779 1780 /// Accumulate a mapping between each DILocalVariable fragment and other 1781 /// fragments of that DILocalVariable which overlap. This reduces work during 1782 /// the data-flow stage from "Find any overlapping fragments" to "Check if the 1783 /// known-to-overlap fragments are present". 1784 /// \param MI A previously unprocessed DEBUG_VALUE instruction to analyze for 1785 /// fragment usage. 1786 /// \param SeenFragments Map from DILocalVariable to all fragments of that 1787 /// Variable which are known to exist. 1788 /// \param OverlappingFragments The overlap map being constructed, from one 1789 /// Var/Fragment pair to a vector of fragments known to overlap. 1790 void VarLocBasedLDV::accumulateFragmentMap(MachineInstr &MI, 1791 VarToFragments &SeenFragments, 1792 OverlapMap &OverlappingFragments) { 1793 DebugVariable MIVar(MI.getDebugVariable(), MI.getDebugExpression(), 1794 MI.getDebugLoc()->getInlinedAt()); 1795 FragmentInfo ThisFragment = MIVar.getFragmentOrDefault(); 1796 1797 // If this is the first sighting of this variable, then we are guaranteed 1798 // there are currently no overlapping fragments either. Initialize the set 1799 // of seen fragments, record no overlaps for the current one, and return. 1800 auto SeenIt = SeenFragments.find(MIVar.getVariable()); 1801 if (SeenIt == SeenFragments.end()) { 1802 SmallSet<FragmentInfo, 4> OneFragment; 1803 OneFragment.insert(ThisFragment); 1804 SeenFragments.insert({MIVar.getVariable(), OneFragment}); 1805 1806 OverlappingFragments.insert({{MIVar.getVariable(), ThisFragment}, {}}); 1807 return; 1808 } 1809 1810 // If this particular Variable/Fragment pair already exists in the overlap 1811 // map, it has already been accounted for. 1812 auto IsInOLapMap = 1813 OverlappingFragments.insert({{MIVar.getVariable(), ThisFragment}, {}}); 1814 if (!IsInOLapMap.second) 1815 return; 1816 1817 auto &ThisFragmentsOverlaps = IsInOLapMap.first->second; 1818 auto &AllSeenFragments = SeenIt->second; 1819 1820 // Otherwise, examine all other seen fragments for this variable, with "this" 1821 // fragment being a previously unseen fragment. Record any pair of 1822 // overlapping fragments. 1823 for (auto &ASeenFragment : AllSeenFragments) { 1824 // Does this previously seen fragment overlap? 1825 if (DIExpression::fragmentsOverlap(ThisFragment, ASeenFragment)) { 1826 // Yes: Mark the current fragment as being overlapped. 1827 ThisFragmentsOverlaps.push_back(ASeenFragment); 1828 // Mark the previously seen fragment as being overlapped by the current 1829 // one. 1830 auto ASeenFragmentsOverlaps = 1831 OverlappingFragments.find({MIVar.getVariable(), ASeenFragment}); 1832 assert(ASeenFragmentsOverlaps != OverlappingFragments.end() && 1833 "Previously seen var fragment has no vector of overlaps"); 1834 ASeenFragmentsOverlaps->second.push_back(ThisFragment); 1835 } 1836 } 1837 1838 AllSeenFragments.insert(ThisFragment); 1839 } 1840 1841 /// This routine creates OpenRanges. 1842 void VarLocBasedLDV::process(MachineInstr &MI, OpenRangesSet &OpenRanges, 1843 VarLocMap &VarLocIDs, TransferMap &Transfers) { 1844 transferDebugValue(MI, OpenRanges, VarLocIDs); 1845 transferRegisterDef(MI, OpenRanges, VarLocIDs, Transfers); 1846 transferRegisterCopy(MI, OpenRanges, VarLocIDs, Transfers); 1847 transferSpillOrRestoreInst(MI, OpenRanges, VarLocIDs, Transfers); 1848 } 1849 1850 /// This routine joins the analysis results of all incoming edges in @MBB by 1851 /// inserting a new DBG_VALUE instruction at the start of the @MBB - if the same 1852 /// source variable in all the predecessors of @MBB reside in the same location. 1853 bool VarLocBasedLDV::join( 1854 MachineBasicBlock &MBB, VarLocInMBB &OutLocs, VarLocInMBB &InLocs, 1855 const VarLocMap &VarLocIDs, 1856 SmallPtrSet<const MachineBasicBlock *, 16> &Visited, 1857 SmallPtrSetImpl<const MachineBasicBlock *> &ArtificialBlocks) { 1858 LLVM_DEBUG(dbgs() << "join MBB: " << MBB.getNumber() << "\n"); 1859 1860 VarLocSet InLocsT(Alloc); // Temporary incoming locations. 1861 1862 // For all predecessors of this MBB, find the set of VarLocs that 1863 // can be joined. 1864 int NumVisited = 0; 1865 for (auto p : MBB.predecessors()) { 1866 // Ignore backedges if we have not visited the predecessor yet. As the 1867 // predecessor hasn't yet had locations propagated into it, most locations 1868 // will not yet be valid, so treat them as all being uninitialized and 1869 // potentially valid. If a location guessed to be correct here is 1870 // invalidated later, we will remove it when we revisit this block. 1871 if (!Visited.count(p)) { 1872 LLVM_DEBUG(dbgs() << " ignoring unvisited pred MBB: " << p->getNumber() 1873 << "\n"); 1874 continue; 1875 } 1876 auto OL = OutLocs.find(p); 1877 // Join is null in case of empty OutLocs from any of the pred. 1878 if (OL == OutLocs.end()) 1879 return false; 1880 1881 // Just copy over the Out locs to incoming locs for the first visited 1882 // predecessor, and for all other predecessors join the Out locs. 1883 VarLocSet &OutLocVLS = *OL->second.get(); 1884 if (!NumVisited) 1885 InLocsT = OutLocVLS; 1886 else 1887 InLocsT &= OutLocVLS; 1888 1889 LLVM_DEBUG({ 1890 if (!InLocsT.empty()) { 1891 VarVec VarLocs; 1892 collectAllVarLocs(VarLocs, InLocsT, VarLocIDs); 1893 for (const VarLoc &VL : VarLocs) 1894 dbgs() << " gathered candidate incoming var: " 1895 << VL.Var.getVariable()->getName() << "\n"; 1896 } 1897 }); 1898 1899 NumVisited++; 1900 } 1901 1902 // Filter out DBG_VALUES that are out of scope. 1903 VarLocSet KillSet(Alloc); 1904 bool IsArtificial = ArtificialBlocks.count(&MBB); 1905 if (!IsArtificial) { 1906 for (uint64_t ID : InLocsT) { 1907 LocIndex Idx = LocIndex::fromRawInteger(ID); 1908 if (!VarLocIDs[Idx].dominates(LS, MBB)) { 1909 KillSet.set(ID); 1910 LLVM_DEBUG({ 1911 auto Name = VarLocIDs[Idx].Var.getVariable()->getName(); 1912 dbgs() << " killing " << Name << ", it doesn't dominate MBB\n"; 1913 }); 1914 } 1915 } 1916 } 1917 InLocsT.intersectWithComplement(KillSet); 1918 1919 // As we are processing blocks in reverse post-order we 1920 // should have processed at least one predecessor, unless it 1921 // is the entry block which has no predecessor. 1922 assert((NumVisited || MBB.pred_empty()) && 1923 "Should have processed at least one predecessor"); 1924 1925 VarLocSet &ILS = getVarLocsInMBB(&MBB, InLocs); 1926 bool Changed = false; 1927 if (ILS != InLocsT) { 1928 ILS = InLocsT; 1929 Changed = true; 1930 } 1931 1932 return Changed; 1933 } 1934 1935 void VarLocBasedLDV::flushPendingLocs(VarLocInMBB &PendingInLocs, 1936 VarLocMap &VarLocIDs) { 1937 // PendingInLocs records all locations propagated into blocks, which have 1938 // not had DBG_VALUE insts created. Go through and create those insts now. 1939 for (auto &Iter : PendingInLocs) { 1940 // Map is keyed on a constant pointer, unwrap it so we can insert insts. 1941 auto &MBB = const_cast<MachineBasicBlock &>(*Iter.first); 1942 VarLocSet &Pending = *Iter.second.get(); 1943 1944 SmallVector<VarLoc, 32> VarLocs; 1945 collectAllVarLocs(VarLocs, Pending, VarLocIDs); 1946 1947 for (VarLoc DiffIt : VarLocs) { 1948 // The ID location is live-in to MBB -- work out what kind of machine 1949 // location it is and create a DBG_VALUE. 1950 if (DiffIt.isEntryBackupLoc()) 1951 continue; 1952 MachineInstr *MI = DiffIt.BuildDbgValue(*MBB.getParent()); 1953 MBB.insert(MBB.instr_begin(), MI); 1954 1955 (void)MI; 1956 LLVM_DEBUG(dbgs() << "Inserted: "; MI->dump();); 1957 } 1958 } 1959 } 1960 1961 bool VarLocBasedLDV::isEntryValueCandidate( 1962 const MachineInstr &MI, const DefinedRegsSet &DefinedRegs) const { 1963 assert(MI.isDebugValue() && "This must be DBG_VALUE."); 1964 1965 // TODO: Add support for local variables that are expressed in terms of 1966 // parameters entry values. 1967 // TODO: Add support for modified arguments that can be expressed 1968 // by using its entry value. 1969 auto *DIVar = MI.getDebugVariable(); 1970 if (!DIVar->isParameter()) 1971 return false; 1972 1973 // Do not consider parameters that belong to an inlined function. 1974 if (MI.getDebugLoc()->getInlinedAt()) 1975 return false; 1976 1977 // Only consider parameters that are described using registers. Parameters 1978 // that are passed on the stack are not yet supported, so ignore debug 1979 // values that are described by the frame or stack pointer. 1980 if (!isRegOtherThanSPAndFP(MI.getDebugOperand(0), MI, TRI)) 1981 return false; 1982 1983 // If a parameter's value has been propagated from the caller, then the 1984 // parameter's DBG_VALUE may be described using a register defined by some 1985 // instruction in the entry block, in which case we shouldn't create an 1986 // entry value. 1987 if (DefinedRegs.count(MI.getDebugOperand(0).getReg())) 1988 return false; 1989 1990 // TODO: Add support for parameters that have a pre-existing debug expressions 1991 // (e.g. fragments). 1992 if (MI.getDebugExpression()->getNumElements() > 0) 1993 return false; 1994 1995 return true; 1996 } 1997 1998 /// Collect all register defines (including aliases) for the given instruction. 1999 static void collectRegDefs(const MachineInstr &MI, DefinedRegsSet &Regs, 2000 const TargetRegisterInfo *TRI) { 2001 for (const MachineOperand &MO : MI.operands()) 2002 if (MO.isReg() && MO.isDef() && MO.getReg()) 2003 for (MCRegAliasIterator AI(MO.getReg(), TRI, true); AI.isValid(); ++AI) 2004 Regs.insert(*AI); 2005 } 2006 2007 /// This routine records the entry values of function parameters. The values 2008 /// could be used as backup values. If we loose the track of some unmodified 2009 /// parameters, the backup values will be used as a primary locations. 2010 void VarLocBasedLDV::recordEntryValue(const MachineInstr &MI, 2011 const DefinedRegsSet &DefinedRegs, 2012 OpenRangesSet &OpenRanges, 2013 VarLocMap &VarLocIDs) { 2014 if (TPC) { 2015 auto &TM = TPC->getTM<TargetMachine>(); 2016 if (!TM.Options.ShouldEmitDebugEntryValues()) 2017 return; 2018 } 2019 2020 DebugVariable V(MI.getDebugVariable(), MI.getDebugExpression(), 2021 MI.getDebugLoc()->getInlinedAt()); 2022 2023 if (!isEntryValueCandidate(MI, DefinedRegs) || 2024 OpenRanges.getEntryValueBackup(V)) 2025 return; 2026 2027 LLVM_DEBUG(dbgs() << "Creating the backup entry location: "; MI.dump();); 2028 2029 // Create the entry value and use it as a backup location until it is 2030 // valid. It is valid until a parameter is not changed. 2031 DIExpression *NewExpr = 2032 DIExpression::prepend(MI.getDebugExpression(), DIExpression::EntryValue); 2033 VarLoc EntryValLocAsBackup = VarLoc::CreateEntryBackupLoc(MI, LS, NewExpr); 2034 LocIndices EntryValLocIDs = VarLocIDs.insert(EntryValLocAsBackup); 2035 OpenRanges.insert(EntryValLocIDs, EntryValLocAsBackup); 2036 } 2037 2038 /// Calculate the liveness information for the given machine function and 2039 /// extend ranges across basic blocks. 2040 bool VarLocBasedLDV::ExtendRanges(MachineFunction &MF, TargetPassConfig *TPC, 2041 unsigned InputBBLimit, 2042 unsigned InputDbgValLimit) { 2043 LLVM_DEBUG(dbgs() << "\nDebug Range Extension\n"); 2044 2045 if (!MF.getFunction().getSubprogram()) 2046 // VarLocBaseLDV will already have removed all DBG_VALUEs. 2047 return false; 2048 2049 // Skip functions from NoDebug compilation units. 2050 if (MF.getFunction().getSubprogram()->getUnit()->getEmissionKind() == 2051 DICompileUnit::NoDebug) 2052 return false; 2053 2054 TRI = MF.getSubtarget().getRegisterInfo(); 2055 TII = MF.getSubtarget().getInstrInfo(); 2056 TFI = MF.getSubtarget().getFrameLowering(); 2057 TFI->getCalleeSaves(MF, CalleeSavedRegs); 2058 this->TPC = TPC; 2059 LS.initialize(MF); 2060 2061 bool Changed = false; 2062 bool OLChanged = false; 2063 bool MBBJoined = false; 2064 2065 VarLocMap VarLocIDs; // Map VarLoc<>unique ID for use in bitvectors. 2066 OverlapMap OverlapFragments; // Map of overlapping variable fragments. 2067 OpenRangesSet OpenRanges(Alloc, OverlapFragments); 2068 // Ranges that are open until end of bb. 2069 VarLocInMBB OutLocs; // Ranges that exist beyond bb. 2070 VarLocInMBB InLocs; // Ranges that are incoming after joining. 2071 TransferMap Transfers; // DBG_VALUEs associated with transfers (such as 2072 // spills, copies and restores). 2073 2074 VarToFragments SeenFragments; 2075 2076 // Blocks which are artificial, i.e. blocks which exclusively contain 2077 // instructions without locations, or with line 0 locations. 2078 SmallPtrSet<const MachineBasicBlock *, 16> ArtificialBlocks; 2079 2080 DenseMap<unsigned int, MachineBasicBlock *> OrderToBB; 2081 DenseMap<MachineBasicBlock *, unsigned int> BBToOrder; 2082 std::priority_queue<unsigned int, std::vector<unsigned int>, 2083 std::greater<unsigned int>> 2084 Worklist; 2085 std::priority_queue<unsigned int, std::vector<unsigned int>, 2086 std::greater<unsigned int>> 2087 Pending; 2088 2089 // Set of register defines that are seen when traversing the entry block 2090 // looking for debug entry value candidates. 2091 DefinedRegsSet DefinedRegs; 2092 2093 // Only in the case of entry MBB collect DBG_VALUEs representing 2094 // function parameters in order to generate debug entry values for them. 2095 MachineBasicBlock &First_MBB = *(MF.begin()); 2096 for (auto &MI : First_MBB) { 2097 collectRegDefs(MI, DefinedRegs, TRI); 2098 if (MI.isDebugValue()) 2099 recordEntryValue(MI, DefinedRegs, OpenRanges, VarLocIDs); 2100 } 2101 2102 // Initialize per-block structures and scan for fragment overlaps. 2103 for (auto &MBB : MF) 2104 for (auto &MI : MBB) 2105 if (MI.isDebugValue()) 2106 accumulateFragmentMap(MI, SeenFragments, OverlapFragments); 2107 2108 auto hasNonArtificialLocation = [](const MachineInstr &MI) -> bool { 2109 if (const DebugLoc &DL = MI.getDebugLoc()) 2110 return DL.getLine() != 0; 2111 return false; 2112 }; 2113 for (auto &MBB : MF) 2114 if (none_of(MBB.instrs(), hasNonArtificialLocation)) 2115 ArtificialBlocks.insert(&MBB); 2116 2117 LLVM_DEBUG(printVarLocInMBB(MF, OutLocs, VarLocIDs, 2118 "OutLocs after initialization", dbgs())); 2119 2120 ReversePostOrderTraversal<MachineFunction *> RPOT(&MF); 2121 unsigned int RPONumber = 0; 2122 for (MachineBasicBlock *MBB : RPOT) { 2123 OrderToBB[RPONumber] = MBB; 2124 BBToOrder[MBB] = RPONumber; 2125 Worklist.push(RPONumber); 2126 ++RPONumber; 2127 } 2128 2129 if (RPONumber > InputBBLimit) { 2130 unsigned NumInputDbgValues = 0; 2131 for (auto &MBB : MF) 2132 for (auto &MI : MBB) 2133 if (MI.isDebugValue()) 2134 ++NumInputDbgValues; 2135 if (NumInputDbgValues > InputDbgValLimit) { 2136 LLVM_DEBUG(dbgs() << "Disabling VarLocBasedLDV: " << MF.getName() 2137 << " has " << RPONumber << " basic blocks and " 2138 << NumInputDbgValues 2139 << " input DBG_VALUEs, exceeding limits.\n"); 2140 return false; 2141 } 2142 } 2143 2144 // This is a standard "union of predecessor outs" dataflow problem. 2145 // To solve it, we perform join() and process() using the two worklist method 2146 // until the ranges converge. 2147 // Ranges have converged when both worklists are empty. 2148 SmallPtrSet<const MachineBasicBlock *, 16> Visited; 2149 while (!Worklist.empty() || !Pending.empty()) { 2150 // We track what is on the pending worklist to avoid inserting the same 2151 // thing twice. We could avoid this with a custom priority queue, but this 2152 // is probably not worth it. 2153 SmallPtrSet<MachineBasicBlock *, 16> OnPending; 2154 LLVM_DEBUG(dbgs() << "Processing Worklist\n"); 2155 while (!Worklist.empty()) { 2156 MachineBasicBlock *MBB = OrderToBB[Worklist.top()]; 2157 Worklist.pop(); 2158 MBBJoined = join(*MBB, OutLocs, InLocs, VarLocIDs, Visited, 2159 ArtificialBlocks); 2160 MBBJoined |= Visited.insert(MBB).second; 2161 if (MBBJoined) { 2162 MBBJoined = false; 2163 Changed = true; 2164 // Now that we have started to extend ranges across BBs we need to 2165 // examine spill, copy and restore instructions to see whether they 2166 // operate with registers that correspond to user variables. 2167 // First load any pending inlocs. 2168 OpenRanges.insertFromLocSet(getVarLocsInMBB(MBB, InLocs), VarLocIDs); 2169 for (auto &MI : *MBB) 2170 process(MI, OpenRanges, VarLocIDs, Transfers); 2171 OLChanged |= transferTerminator(MBB, OpenRanges, OutLocs, VarLocIDs); 2172 2173 LLVM_DEBUG(printVarLocInMBB(MF, OutLocs, VarLocIDs, 2174 "OutLocs after propagating", dbgs())); 2175 LLVM_DEBUG(printVarLocInMBB(MF, InLocs, VarLocIDs, 2176 "InLocs after propagating", dbgs())); 2177 2178 if (OLChanged) { 2179 OLChanged = false; 2180 for (auto s : MBB->successors()) 2181 if (OnPending.insert(s).second) { 2182 Pending.push(BBToOrder[s]); 2183 } 2184 } 2185 } 2186 } 2187 Worklist.swap(Pending); 2188 // At this point, pending must be empty, since it was just the empty 2189 // worklist 2190 assert(Pending.empty() && "Pending should be empty"); 2191 } 2192 2193 // Add any DBG_VALUE instructions created by location transfers. 2194 for (auto &TR : Transfers) { 2195 assert(!TR.TransferInst->isTerminator() && 2196 "Cannot insert DBG_VALUE after terminator"); 2197 MachineBasicBlock *MBB = TR.TransferInst->getParent(); 2198 const VarLoc &VL = VarLocIDs[TR.LocationID]; 2199 MachineInstr *MI = VL.BuildDbgValue(MF); 2200 MBB->insertAfterBundle(TR.TransferInst->getIterator(), MI); 2201 } 2202 Transfers.clear(); 2203 2204 // Deferred inlocs will not have had any DBG_VALUE insts created; do 2205 // that now. 2206 flushPendingLocs(InLocs, VarLocIDs); 2207 2208 LLVM_DEBUG(printVarLocInMBB(MF, OutLocs, VarLocIDs, "Final OutLocs", dbgs())); 2209 LLVM_DEBUG(printVarLocInMBB(MF, InLocs, VarLocIDs, "Final InLocs", dbgs())); 2210 return Changed; 2211 } 2212 2213 LDVImpl * 2214 llvm::makeVarLocBasedLiveDebugValues() 2215 { 2216 return new VarLocBasedLDV(); 2217 } 2218