1 //===-- StackFrameList.cpp --------------------------------------*- C++ -*-===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 10 #include "lldb/Target/StackFrameList.h" 11 #include "lldb/Breakpoint/Breakpoint.h" 12 #include "lldb/Breakpoint/BreakpointLocation.h" 13 #include "lldb/Core/SourceManager.h" 14 #include "lldb/Core/StreamFile.h" 15 #include "lldb/Symbol/Block.h" 16 #include "lldb/Symbol/Function.h" 17 #include "lldb/Symbol/Symbol.h" 18 #include "lldb/Target/Process.h" 19 #include "lldb/Target/RegisterContext.h" 20 #include "lldb/Target/StackFrame.h" 21 #include "lldb/Target/StopInfo.h" 22 #include "lldb/Target/Target.h" 23 #include "lldb/Target/Thread.h" 24 #include "lldb/Target/Unwind.h" 25 #include "lldb/Utility/Log.h" 26 #include "llvm/ADT/SmallPtrSet.h" 27 28 //#define DEBUG_STACK_FRAMES 1 29 30 using namespace lldb; 31 using namespace lldb_private; 32 33 //---------------------------------------------------------------------- 34 // StackFrameList constructor 35 //---------------------------------------------------------------------- 36 StackFrameList::StackFrameList(Thread &thread, 37 const lldb::StackFrameListSP &prev_frames_sp, 38 bool show_inline_frames) 39 : m_thread(thread), m_prev_frames_sp(prev_frames_sp), m_mutex(), m_frames(), 40 m_selected_frame_idx(0), m_concrete_frames_fetched(0), 41 m_current_inlined_depth(UINT32_MAX), 42 m_current_inlined_pc(LLDB_INVALID_ADDRESS), 43 m_show_inlined_frames(show_inline_frames) { 44 if (prev_frames_sp) { 45 m_current_inlined_depth = prev_frames_sp->m_current_inlined_depth; 46 m_current_inlined_pc = prev_frames_sp->m_current_inlined_pc; 47 } 48 } 49 50 StackFrameList::~StackFrameList() { 51 // Call clear since this takes a lock and clears the stack frame list in case 52 // another thread is currently using this stack frame list 53 Clear(); 54 } 55 56 void StackFrameList::CalculateCurrentInlinedDepth() { 57 uint32_t cur_inlined_depth = GetCurrentInlinedDepth(); 58 if (cur_inlined_depth == UINT32_MAX) { 59 ResetCurrentInlinedDepth(); 60 } 61 } 62 63 uint32_t StackFrameList::GetCurrentInlinedDepth() { 64 if (m_show_inlined_frames && m_current_inlined_pc != LLDB_INVALID_ADDRESS) { 65 lldb::addr_t cur_pc = m_thread.GetRegisterContext()->GetPC(); 66 if (cur_pc != m_current_inlined_pc) { 67 m_current_inlined_pc = LLDB_INVALID_ADDRESS; 68 m_current_inlined_depth = UINT32_MAX; 69 Log *log(lldb_private::GetLogIfAllCategoriesSet(LIBLLDB_LOG_STEP)); 70 if (log && log->GetVerbose()) 71 log->Printf( 72 "GetCurrentInlinedDepth: invalidating current inlined depth.\n"); 73 } 74 return m_current_inlined_depth; 75 } else { 76 return UINT32_MAX; 77 } 78 } 79 80 void StackFrameList::ResetCurrentInlinedDepth() { 81 if (!m_show_inlined_frames) 82 return; 83 84 std::lock_guard<std::recursive_mutex> guard(m_mutex); 85 86 GetFramesUpTo(0); 87 if (m_frames.empty()) 88 return; 89 if (!m_frames[0]->IsInlined()) { 90 m_current_inlined_depth = UINT32_MAX; 91 m_current_inlined_pc = LLDB_INVALID_ADDRESS; 92 Log *log(lldb_private::GetLogIfAllCategoriesSet(LIBLLDB_LOG_STEP)); 93 if (log && log->GetVerbose()) 94 log->Printf( 95 "ResetCurrentInlinedDepth: Invalidating current inlined depth.\n"); 96 return; 97 } 98 99 // We only need to do something special about inlined blocks when we are 100 // at the beginning of an inlined function: 101 // FIXME: We probably also have to do something special if the PC is at 102 // the END of an inlined function, which coincides with the end of either 103 // its containing function or another inlined function. 104 105 Block *block_ptr = m_frames[0]->GetFrameBlock(); 106 if (!block_ptr) 107 return; 108 109 Address pc_as_address; 110 lldb::addr_t curr_pc = m_thread.GetRegisterContext()->GetPC(); 111 pc_as_address.SetLoadAddress(curr_pc, &(m_thread.GetProcess()->GetTarget())); 112 AddressRange containing_range; 113 if (!block_ptr->GetRangeContainingAddress(pc_as_address, containing_range) || 114 pc_as_address != containing_range.GetBaseAddress()) 115 return; 116 117 // If we got here because of a breakpoint hit, then set the inlined depth 118 // depending on where the breakpoint was set. If we got here because of a 119 // crash, then set the inlined depth to the deepest most block. Otherwise, 120 // we stopped here naturally as the result of a step, so set ourselves in the 121 // containing frame of the whole set of nested inlines, so the user can then 122 // "virtually" step into the frames one by one, or next over the whole mess. 123 // Note: We don't have to handle being somewhere in the middle of the stack 124 // here, since ResetCurrentInlinedDepth doesn't get called if there is a 125 // valid inlined depth set. 126 StopInfoSP stop_info_sp = m_thread.GetStopInfo(); 127 if (!stop_info_sp) 128 return; 129 switch (stop_info_sp->GetStopReason()) { 130 case eStopReasonWatchpoint: 131 case eStopReasonException: 132 case eStopReasonExec: 133 case eStopReasonSignal: 134 // In all these cases we want to stop in the deepest frame. 135 m_current_inlined_pc = curr_pc; 136 m_current_inlined_depth = 0; 137 break; 138 case eStopReasonBreakpoint: { 139 // FIXME: Figure out what this break point is doing, and set the inline 140 // depth appropriately. Be careful to take into account breakpoints that 141 // implement step over prologue, since that should do the default 142 // calculation. For now, if the breakpoints corresponding to this hit are 143 // all internal, I set the stop location to the top of the inlined stack, 144 // since that will make things like stepping over prologues work right. 145 // But if there are any non-internal breakpoints I do to the bottom of the 146 // stack, since that was the old behavior. 147 uint32_t bp_site_id = stop_info_sp->GetValue(); 148 BreakpointSiteSP bp_site_sp( 149 m_thread.GetProcess()->GetBreakpointSiteList().FindByID(bp_site_id)); 150 bool all_internal = true; 151 if (bp_site_sp) { 152 uint32_t num_owners = bp_site_sp->GetNumberOfOwners(); 153 for (uint32_t i = 0; i < num_owners; i++) { 154 Breakpoint &bp_ref = bp_site_sp->GetOwnerAtIndex(i)->GetBreakpoint(); 155 if (!bp_ref.IsInternal()) { 156 all_internal = false; 157 } 158 } 159 } 160 if (!all_internal) { 161 m_current_inlined_pc = curr_pc; 162 m_current_inlined_depth = 0; 163 break; 164 } 165 } 166 LLVM_FALLTHROUGH; 167 default: { 168 // Otherwise, we should set ourselves at the container of the inlining, so 169 // that the user can descend into them. So first we check whether we have 170 // more than one inlined block sharing this PC: 171 int num_inlined_functions = 0; 172 173 for (Block *container_ptr = block_ptr->GetInlinedParent(); 174 container_ptr != nullptr; 175 container_ptr = container_ptr->GetInlinedParent()) { 176 if (!container_ptr->GetRangeContainingAddress(pc_as_address, 177 containing_range)) 178 break; 179 if (pc_as_address != containing_range.GetBaseAddress()) 180 break; 181 182 num_inlined_functions++; 183 } 184 m_current_inlined_pc = curr_pc; 185 m_current_inlined_depth = num_inlined_functions + 1; 186 Log *log(lldb_private::GetLogIfAllCategoriesSet(LIBLLDB_LOG_STEP)); 187 if (log && log->GetVerbose()) 188 log->Printf("ResetCurrentInlinedDepth: setting inlined " 189 "depth: %d 0x%" PRIx64 ".\n", 190 m_current_inlined_depth, curr_pc); 191 192 break; 193 } 194 } 195 } 196 197 bool StackFrameList::DecrementCurrentInlinedDepth() { 198 if (m_show_inlined_frames) { 199 uint32_t current_inlined_depth = GetCurrentInlinedDepth(); 200 if (current_inlined_depth != UINT32_MAX) { 201 if (current_inlined_depth > 0) { 202 m_current_inlined_depth--; 203 return true; 204 } 205 } 206 } 207 return false; 208 } 209 210 void StackFrameList::SetCurrentInlinedDepth(uint32_t new_depth) { 211 m_current_inlined_depth = new_depth; 212 if (new_depth == UINT32_MAX) 213 m_current_inlined_pc = LLDB_INVALID_ADDRESS; 214 else 215 m_current_inlined_pc = m_thread.GetRegisterContext()->GetPC(); 216 } 217 218 void StackFrameList::GetOnlyConcreteFramesUpTo(uint32_t end_idx, 219 Unwind *unwinder) { 220 assert(m_thread.IsValid() && "Expected valid thread"); 221 assert(m_frames.size() <= end_idx && "Expected there to be frames to fill"); 222 223 if (end_idx < m_concrete_frames_fetched) 224 return; 225 226 if (!unwinder) 227 return; 228 229 uint32_t num_frames = unwinder->GetFramesUpTo(end_idx); 230 if (num_frames <= end_idx + 1) { 231 // Done unwinding. 232 m_concrete_frames_fetched = UINT32_MAX; 233 } 234 235 // Don't create the frames eagerly. Defer this work to GetFrameAtIndex, 236 // which can lazily query the unwinder to create frames. 237 m_frames.resize(num_frames); 238 } 239 240 /// Find the unique path through the call graph from \p begin (with return PC 241 /// \p return_pc) to \p end. On success this path is stored into \p path, and 242 /// on failure \p path is unchanged. 243 static void FindInterveningFrames(Function &begin, Function &end, 244 Target &target, addr_t return_pc, 245 std::vector<Function *> &path, 246 ModuleList &images, Log *log) { 247 LLDB_LOG(log, "Finding frames between {0} and {1}, retn-pc={2:x}", 248 begin.GetDisplayName(), end.GetDisplayName(), return_pc); 249 250 // Find a non-tail calling edge with the correct return PC. 251 auto first_level_edges = begin.GetCallEdges(); 252 if (log) 253 for (const CallEdge &edge : first_level_edges) 254 LLDB_LOG(log, "FindInterveningFrames: found call with retn-PC = {0:x}", 255 edge.GetReturnPCAddress(begin, target)); 256 auto first_edge_it = std::lower_bound( 257 first_level_edges.begin(), first_level_edges.end(), return_pc, 258 [&](const CallEdge &edge, addr_t target_pc) { 259 return edge.GetReturnPCAddress(begin, target) < target_pc; 260 }); 261 if (first_edge_it == first_level_edges.end() || 262 first_edge_it->GetReturnPCAddress(begin, target) != return_pc) { 263 LLDB_LOG(log, "No call edge outgoing from {0} with retn-PC == {1:x}", 264 begin.GetDisplayName(), return_pc); 265 return; 266 } 267 CallEdge &first_edge = const_cast<CallEdge &>(*first_edge_it); 268 269 // The first callee may not be resolved, or there may be nothing to fill in. 270 Function *first_callee = first_edge.GetCallee(images); 271 if (!first_callee) { 272 LLDB_LOG(log, "Could not resolve callee"); 273 return; 274 } 275 if (first_callee == &end) { 276 LLDB_LOG(log, "Not searching further, first callee is {0} (retn-PC: {1:x})", 277 end.GetDisplayName(), return_pc); 278 return; 279 } 280 281 // Run DFS on the tail-calling edges out of the first callee to find \p end. 282 // Fully explore the set of functions reachable from the first edge via tail 283 // calls in order to detect ambiguous executions. 284 struct DFS { 285 std::vector<Function *> active_path = {}; 286 std::vector<Function *> solution_path = {}; 287 llvm::SmallPtrSet<Function *, 2> visited_nodes = {}; 288 bool ambiguous = false; 289 Function *end; 290 ModuleList &images; 291 292 DFS(Function *end, ModuleList &images) : end(end), images(images) {} 293 294 void search(Function *first_callee, std::vector<Function *> &path) { 295 dfs(first_callee); 296 if (!ambiguous) 297 path = std::move(solution_path); 298 } 299 300 void dfs(Function *callee) { 301 // Found a path to the target function. 302 if (callee == end) { 303 if (solution_path.empty()) 304 solution_path = active_path; 305 else 306 ambiguous = true; 307 return; 308 } 309 310 // Terminate the search if tail recursion is found, or more generally if 311 // there's more than one way to reach a target. This errs on the side of 312 // caution: it conservatively stops searching when some solutions are 313 // still possible to save time in the average case. 314 if (!visited_nodes.insert(callee).second) { 315 ambiguous = true; 316 return; 317 } 318 319 // Search the calls made from this callee. 320 active_path.push_back(callee); 321 for (CallEdge &edge : callee->GetTailCallingEdges()) { 322 Function *next_callee = edge.GetCallee(images); 323 if (!next_callee) 324 continue; 325 326 dfs(next_callee); 327 if (ambiguous) 328 return; 329 } 330 active_path.pop_back(); 331 } 332 }; 333 334 DFS(&end, images).search(first_callee, path); 335 } 336 337 /// Given that \p next_frame will be appended to the frame list, synthesize 338 /// tail call frames between the current end of the list and \p next_frame. 339 /// If any frames are added, adjust the frame index of \p next_frame. 340 /// 341 /// -------------- 342 /// | ... | <- Completed frames. 343 /// -------------- 344 /// | prev_frame | 345 /// -------------- 346 /// | ... | <- Artificial frames inserted here. 347 /// -------------- 348 /// | next_frame | 349 /// -------------- 350 /// | ... | <- Not-yet-visited frames. 351 /// -------------- 352 void StackFrameList::SynthesizeTailCallFrames(StackFrame &next_frame) { 353 TargetSP target_sp = next_frame.CalculateTarget(); 354 if (!target_sp) 355 return; 356 357 lldb::RegisterContextSP next_reg_ctx_sp = next_frame.GetRegisterContext(); 358 if (!next_reg_ctx_sp) 359 return; 360 361 Log *log(lldb_private::GetLogIfAllCategoriesSet(LIBLLDB_LOG_STEP)); 362 363 assert(!m_frames.empty() && "Cannot synthesize frames in an empty stack"); 364 StackFrame &prev_frame = *m_frames.back().get(); 365 366 // Find the functions prev_frame and next_frame are stopped in. The function 367 // objects are needed to search the lazy call graph for intervening frames. 368 Function *prev_func = 369 prev_frame.GetSymbolContext(eSymbolContextFunction).function; 370 if (!prev_func) { 371 LLDB_LOG(log, "SynthesizeTailCallFrames: can't find previous function"); 372 return; 373 } 374 Function *next_func = 375 next_frame.GetSymbolContext(eSymbolContextFunction).function; 376 if (!next_func) { 377 LLDB_LOG(log, "SynthesizeTailCallFrames: can't find next function"); 378 return; 379 } 380 381 // Try to find the unique sequence of (tail) calls which led from next_frame 382 // to prev_frame. 383 std::vector<Function *> path; 384 addr_t return_pc = next_reg_ctx_sp->GetPC(); 385 Target &target = *target_sp.get(); 386 ModuleList &images = next_frame.CalculateTarget()->GetImages(); 387 FindInterveningFrames(*next_func, *prev_func, target, return_pc, path, images, 388 log); 389 390 // Push synthetic tail call frames. 391 for (Function *callee : llvm::reverse(path)) { 392 uint32_t frame_idx = m_frames.size(); 393 uint32_t concrete_frame_idx = next_frame.GetConcreteFrameIndex(); 394 addr_t cfa = LLDB_INVALID_ADDRESS; 395 bool cfa_is_valid = false; 396 addr_t pc = 397 callee->GetAddressRange().GetBaseAddress().GetLoadAddress(&target); 398 SymbolContext sc; 399 callee->CalculateSymbolContext(&sc); 400 auto synth_frame = std::make_shared<StackFrame>( 401 m_thread.shared_from_this(), frame_idx, concrete_frame_idx, cfa, 402 cfa_is_valid, pc, StackFrame::Kind::Artificial, &sc); 403 m_frames.push_back(synth_frame); 404 LLDB_LOG(log, "Pushed frame {0}", callee->GetDisplayName()); 405 } 406 407 // If any frames were created, adjust next_frame's index. 408 if (!path.empty()) 409 next_frame.SetFrameIndex(m_frames.size()); 410 } 411 412 void StackFrameList::GetFramesUpTo(uint32_t end_idx) { 413 // Do not fetch frames for an invalid thread. 414 if (!m_thread.IsValid()) 415 return; 416 417 // We've already gotten more frames than asked for, or we've already finished 418 // unwinding, return. 419 if (m_frames.size() > end_idx || GetAllFramesFetched()) 420 return; 421 422 Unwind *unwinder = m_thread.GetUnwinder(); 423 424 if (!m_show_inlined_frames) { 425 GetOnlyConcreteFramesUpTo(end_idx, unwinder); 426 return; 427 } 428 429 #if defined(DEBUG_STACK_FRAMES) 430 StreamFile s(stdout, false); 431 #endif 432 // If we are hiding some frames from the outside world, we need to add 433 // those onto the total count of frames to fetch. However, we don't need 434 // to do that if end_idx is 0 since in that case we always get the first 435 // concrete frame and all the inlined frames below it... And of course, if 436 // end_idx is UINT32_MAX that means get all, so just do that... 437 438 uint32_t inlined_depth = 0; 439 if (end_idx > 0 && end_idx != UINT32_MAX) { 440 inlined_depth = GetCurrentInlinedDepth(); 441 if (inlined_depth != UINT32_MAX) { 442 if (end_idx > 0) 443 end_idx += inlined_depth; 444 } 445 } 446 447 StackFrameSP unwind_frame_sp; 448 do { 449 uint32_t idx = m_concrete_frames_fetched++; 450 lldb::addr_t pc = LLDB_INVALID_ADDRESS; 451 lldb::addr_t cfa = LLDB_INVALID_ADDRESS; 452 if (idx == 0) { 453 // We might have already created frame zero, only create it if we need 454 // to. 455 if (m_frames.empty()) { 456 RegisterContextSP reg_ctx_sp(m_thread.GetRegisterContext()); 457 458 if (reg_ctx_sp) { 459 const bool success = 460 unwinder && unwinder->GetFrameInfoAtIndex(idx, cfa, pc); 461 // There shouldn't be any way not to get the frame info for frame 462 // 0. But if the unwinder can't make one, lets make one by hand 463 // with the SP as the CFA and see if that gets any further. 464 if (!success) { 465 cfa = reg_ctx_sp->GetSP(); 466 pc = reg_ctx_sp->GetPC(); 467 } 468 469 unwind_frame_sp.reset(new StackFrame(m_thread.shared_from_this(), 470 m_frames.size(), idx, reg_ctx_sp, 471 cfa, pc, nullptr)); 472 m_frames.push_back(unwind_frame_sp); 473 } 474 } else { 475 unwind_frame_sp = m_frames.front(); 476 cfa = unwind_frame_sp->m_id.GetCallFrameAddress(); 477 } 478 } else { 479 const bool success = 480 unwinder && unwinder->GetFrameInfoAtIndex(idx, cfa, pc); 481 if (!success) { 482 // We've gotten to the end of the stack. 483 SetAllFramesFetched(); 484 break; 485 } 486 const bool cfa_is_valid = true; 487 unwind_frame_sp.reset( 488 new StackFrame(m_thread.shared_from_this(), m_frames.size(), idx, cfa, 489 cfa_is_valid, pc, StackFrame::Kind::Regular, nullptr)); 490 491 // Create synthetic tail call frames between the previous frame and the 492 // newly-found frame. The new frame's index may change after this call, 493 // although its concrete index will stay the same. 494 SynthesizeTailCallFrames(*unwind_frame_sp.get()); 495 496 m_frames.push_back(unwind_frame_sp); 497 } 498 499 assert(unwind_frame_sp); 500 SymbolContext unwind_sc = unwind_frame_sp->GetSymbolContext( 501 eSymbolContextBlock | eSymbolContextFunction); 502 Block *unwind_block = unwind_sc.block; 503 if (unwind_block) { 504 Address curr_frame_address(unwind_frame_sp->GetFrameCodeAddress()); 505 TargetSP target_sp = m_thread.CalculateTarget(); 506 // Be sure to adjust the frame address to match the address that was 507 // used to lookup the symbol context above. If we are in the first 508 // concrete frame, then we lookup using the current address, else we 509 // decrement the address by one to get the correct location. 510 if (idx > 0) { 511 if (curr_frame_address.GetOffset() == 0) { 512 // If curr_frame_address points to the first address in a section 513 // then after adjustment it will point to an other section. In that 514 // case resolve the address again to the correct section plus 515 // offset form. 516 addr_t load_addr = curr_frame_address.GetOpcodeLoadAddress( 517 target_sp.get(), AddressClass::eCode); 518 curr_frame_address.SetOpcodeLoadAddress( 519 load_addr - 1, target_sp.get(), AddressClass::eCode); 520 } else { 521 curr_frame_address.Slide(-1); 522 } 523 } 524 525 SymbolContext next_frame_sc; 526 Address next_frame_address; 527 528 while (unwind_sc.GetParentOfInlinedScope( 529 curr_frame_address, next_frame_sc, next_frame_address)) { 530 next_frame_sc.line_entry.ApplyFileMappings(target_sp); 531 StackFrameSP frame_sp( 532 new StackFrame(m_thread.shared_from_this(), m_frames.size(), idx, 533 unwind_frame_sp->GetRegisterContextSP(), cfa, 534 next_frame_address, &next_frame_sc)); 535 536 m_frames.push_back(frame_sp); 537 unwind_sc = next_frame_sc; 538 curr_frame_address = next_frame_address; 539 } 540 } 541 } while (m_frames.size() - 1 < end_idx); 542 543 // Don't try to merge till you've calculated all the frames in this stack. 544 if (GetAllFramesFetched() && m_prev_frames_sp) { 545 StackFrameList *prev_frames = m_prev_frames_sp.get(); 546 StackFrameList *curr_frames = this; 547 548 #if defined(DEBUG_STACK_FRAMES) 549 s.PutCString("\nprev_frames:\n"); 550 prev_frames->Dump(&s); 551 s.PutCString("\ncurr_frames:\n"); 552 curr_frames->Dump(&s); 553 s.EOL(); 554 #endif 555 size_t curr_frame_num, prev_frame_num; 556 557 for (curr_frame_num = curr_frames->m_frames.size(), 558 prev_frame_num = prev_frames->m_frames.size(); 559 curr_frame_num > 0 && prev_frame_num > 0; 560 --curr_frame_num, --prev_frame_num) { 561 const size_t curr_frame_idx = curr_frame_num - 1; 562 const size_t prev_frame_idx = prev_frame_num - 1; 563 StackFrameSP curr_frame_sp(curr_frames->m_frames[curr_frame_idx]); 564 StackFrameSP prev_frame_sp(prev_frames->m_frames[prev_frame_idx]); 565 566 #if defined(DEBUG_STACK_FRAMES) 567 s.Printf("\n\nCurr frame #%u ", curr_frame_idx); 568 if (curr_frame_sp) 569 curr_frame_sp->Dump(&s, true, false); 570 else 571 s.PutCString("NULL"); 572 s.Printf("\nPrev frame #%u ", prev_frame_idx); 573 if (prev_frame_sp) 574 prev_frame_sp->Dump(&s, true, false); 575 else 576 s.PutCString("NULL"); 577 #endif 578 579 StackFrame *curr_frame = curr_frame_sp.get(); 580 StackFrame *prev_frame = prev_frame_sp.get(); 581 582 if (curr_frame == nullptr || prev_frame == nullptr) 583 break; 584 585 // Check the stack ID to make sure they are equal. 586 if (curr_frame->GetStackID() != prev_frame->GetStackID()) 587 break; 588 589 prev_frame->UpdatePreviousFrameFromCurrentFrame(*curr_frame); 590 // Now copy the fixed up previous frame into the current frames so the 591 // pointer doesn't change. 592 m_frames[curr_frame_idx] = prev_frame_sp; 593 594 #if defined(DEBUG_STACK_FRAMES) 595 s.Printf("\n Copying previous frame to current frame"); 596 #endif 597 } 598 // We are done with the old stack frame list, we can release it now. 599 m_prev_frames_sp.reset(); 600 } 601 602 #if defined(DEBUG_STACK_FRAMES) 603 s.PutCString("\n\nNew frames:\n"); 604 Dump(&s); 605 s.EOL(); 606 #endif 607 } 608 609 uint32_t StackFrameList::GetNumFrames(bool can_create) { 610 std::lock_guard<std::recursive_mutex> guard(m_mutex); 611 612 if (can_create) 613 GetFramesUpTo(UINT32_MAX); 614 615 return GetVisibleStackFrameIndex(m_frames.size()); 616 } 617 618 void StackFrameList::Dump(Stream *s) { 619 if (s == nullptr) 620 return; 621 622 std::lock_guard<std::recursive_mutex> guard(m_mutex); 623 624 const_iterator pos, begin = m_frames.begin(), end = m_frames.end(); 625 for (pos = begin; pos != end; ++pos) { 626 StackFrame *frame = (*pos).get(); 627 s->Printf("%p: ", static_cast<void *>(frame)); 628 if (frame) { 629 frame->GetStackID().Dump(s); 630 frame->DumpUsingSettingsFormat(s); 631 } else 632 s->Printf("frame #%u", (uint32_t)std::distance(begin, pos)); 633 s->EOL(); 634 } 635 s->EOL(); 636 } 637 638 StackFrameSP StackFrameList::GetFrameAtIndex(uint32_t idx) { 639 StackFrameSP frame_sp; 640 std::lock_guard<std::recursive_mutex> guard(m_mutex); 641 uint32_t original_idx = idx; 642 643 uint32_t inlined_depth = GetCurrentInlinedDepth(); 644 if (inlined_depth != UINT32_MAX) 645 idx += inlined_depth; 646 647 if (idx < m_frames.size()) 648 frame_sp = m_frames[idx]; 649 650 if (frame_sp) 651 return frame_sp; 652 653 // GetFramesUpTo will fill m_frames with as many frames as you asked for, if 654 // there are that many. If there weren't then you asked for too many frames. 655 GetFramesUpTo(idx); 656 if (idx < m_frames.size()) { 657 if (m_show_inlined_frames) { 658 // When inline frames are enabled we actually create all the frames in 659 // GetFramesUpTo. 660 frame_sp = m_frames[idx]; 661 } else { 662 Unwind *unwinder = m_thread.GetUnwinder(); 663 if (unwinder) { 664 addr_t pc, cfa; 665 if (unwinder->GetFrameInfoAtIndex(idx, cfa, pc)) { 666 const bool cfa_is_valid = true; 667 frame_sp.reset(new StackFrame(m_thread.shared_from_this(), idx, idx, 668 cfa, cfa_is_valid, pc, 669 StackFrame::Kind::Regular, nullptr)); 670 671 Function *function = 672 frame_sp->GetSymbolContext(eSymbolContextFunction).function; 673 if (function) { 674 // When we aren't showing inline functions we always use the top 675 // most function block as the scope. 676 frame_sp->SetSymbolContextScope(&function->GetBlock(false)); 677 } else { 678 // Set the symbol scope from the symbol regardless if it is nullptr 679 // or valid. 680 frame_sp->SetSymbolContextScope( 681 frame_sp->GetSymbolContext(eSymbolContextSymbol).symbol); 682 } 683 SetFrameAtIndex(idx, frame_sp); 684 } 685 } 686 } 687 } else if (original_idx == 0) { 688 // There should ALWAYS be a frame at index 0. If something went wrong with 689 // the CurrentInlinedDepth such that there weren't as many frames as we 690 // thought taking that into account, then reset the current inlined depth 691 // and return the real zeroth frame. 692 if (m_frames.empty()) { 693 // Why do we have a thread with zero frames, that should not ever 694 // happen... 695 assert(!m_thread.IsValid() && "A valid thread has no frames."); 696 } else { 697 ResetCurrentInlinedDepth(); 698 frame_sp = m_frames[original_idx]; 699 } 700 } 701 702 return frame_sp; 703 } 704 705 StackFrameSP 706 StackFrameList::GetFrameWithConcreteFrameIndex(uint32_t unwind_idx) { 707 // First try assuming the unwind index is the same as the frame index. The 708 // unwind index is always greater than or equal to the frame index, so it is 709 // a good place to start. If we have inlined frames we might have 5 concrete 710 // frames (frame unwind indexes go from 0-4), but we might have 15 frames 711 // after we make all the inlined frames. Most of the time the unwind frame 712 // index (or the concrete frame index) is the same as the frame index. 713 uint32_t frame_idx = unwind_idx; 714 StackFrameSP frame_sp(GetFrameAtIndex(frame_idx)); 715 while (frame_sp) { 716 if (frame_sp->GetFrameIndex() == unwind_idx) 717 break; 718 frame_sp = GetFrameAtIndex(++frame_idx); 719 } 720 return frame_sp; 721 } 722 723 static bool CompareStackID(const StackFrameSP &stack_sp, 724 const StackID &stack_id) { 725 return stack_sp->GetStackID() < stack_id; 726 } 727 728 StackFrameSP StackFrameList::GetFrameWithStackID(const StackID &stack_id) { 729 StackFrameSP frame_sp; 730 731 if (stack_id.IsValid()) { 732 std::lock_guard<std::recursive_mutex> guard(m_mutex); 733 uint32_t frame_idx = 0; 734 // Do a binary search in case the stack frame is already in our cache 735 collection::const_iterator begin = m_frames.begin(); 736 collection::const_iterator end = m_frames.end(); 737 if (begin != end) { 738 collection::const_iterator pos = 739 std::lower_bound(begin, end, stack_id, CompareStackID); 740 if (pos != end) { 741 if ((*pos)->GetStackID() == stack_id) 742 return *pos; 743 } 744 } 745 do { 746 frame_sp = GetFrameAtIndex(frame_idx); 747 if (frame_sp && frame_sp->GetStackID() == stack_id) 748 break; 749 frame_idx++; 750 } while (frame_sp); 751 } 752 return frame_sp; 753 } 754 755 bool StackFrameList::SetFrameAtIndex(uint32_t idx, StackFrameSP &frame_sp) { 756 if (idx >= m_frames.size()) 757 m_frames.resize(idx + 1); 758 // Make sure allocation succeeded by checking bounds again 759 if (idx < m_frames.size()) { 760 m_frames[idx] = frame_sp; 761 return true; 762 } 763 return false; // resize failed, out of memory? 764 } 765 766 uint32_t StackFrameList::GetSelectedFrameIndex() const { 767 std::lock_guard<std::recursive_mutex> guard(m_mutex); 768 return m_selected_frame_idx; 769 } 770 771 uint32_t StackFrameList::SetSelectedFrame(lldb_private::StackFrame *frame) { 772 std::lock_guard<std::recursive_mutex> guard(m_mutex); 773 const_iterator pos; 774 const_iterator begin = m_frames.begin(); 775 const_iterator end = m_frames.end(); 776 m_selected_frame_idx = 0; 777 for (pos = begin; pos != end; ++pos) { 778 if (pos->get() == frame) { 779 m_selected_frame_idx = std::distance(begin, pos); 780 uint32_t inlined_depth = GetCurrentInlinedDepth(); 781 if (inlined_depth != UINT32_MAX) 782 m_selected_frame_idx -= inlined_depth; 783 break; 784 } 785 } 786 SetDefaultFileAndLineToSelectedFrame(); 787 return m_selected_frame_idx; 788 } 789 790 bool StackFrameList::SetSelectedFrameByIndex(uint32_t idx) { 791 std::lock_guard<std::recursive_mutex> guard(m_mutex); 792 StackFrameSP frame_sp(GetFrameAtIndex(idx)); 793 if (frame_sp) { 794 SetSelectedFrame(frame_sp.get()); 795 return true; 796 } else 797 return false; 798 } 799 800 void StackFrameList::SetDefaultFileAndLineToSelectedFrame() { 801 if (m_thread.GetID() == 802 m_thread.GetProcess()->GetThreadList().GetSelectedThread()->GetID()) { 803 StackFrameSP frame_sp(GetFrameAtIndex(GetSelectedFrameIndex())); 804 if (frame_sp) { 805 SymbolContext sc = frame_sp->GetSymbolContext(eSymbolContextLineEntry); 806 if (sc.line_entry.file) 807 m_thread.CalculateTarget()->GetSourceManager().SetDefaultFileAndLine( 808 sc.line_entry.file, sc.line_entry.line); 809 } 810 } 811 } 812 813 // The thread has been run, reset the number stack frames to zero so we can 814 // determine how many frames we have lazily. 815 void StackFrameList::Clear() { 816 std::lock_guard<std::recursive_mutex> guard(m_mutex); 817 m_frames.clear(); 818 m_concrete_frames_fetched = 0; 819 } 820 821 void StackFrameList::Merge(std::unique_ptr<StackFrameList> &curr_ap, 822 lldb::StackFrameListSP &prev_sp) { 823 std::unique_lock<std::recursive_mutex> current_lock, previous_lock; 824 if (curr_ap) 825 current_lock = std::unique_lock<std::recursive_mutex>(curr_ap->m_mutex); 826 if (prev_sp) 827 previous_lock = std::unique_lock<std::recursive_mutex>(prev_sp->m_mutex); 828 829 #if defined(DEBUG_STACK_FRAMES) 830 StreamFile s(stdout, false); 831 s.PutCString("\n\nStackFrameList::Merge():\nPrev:\n"); 832 if (prev_sp) 833 prev_sp->Dump(&s); 834 else 835 s.PutCString("NULL"); 836 s.PutCString("\nCurr:\n"); 837 if (curr_ap) 838 curr_ap->Dump(&s); 839 else 840 s.PutCString("NULL"); 841 s.EOL(); 842 #endif 843 844 if (!curr_ap || curr_ap->GetNumFrames(false) == 0) { 845 #if defined(DEBUG_STACK_FRAMES) 846 s.PutCString("No current frames, leave previous frames alone...\n"); 847 #endif 848 curr_ap.release(); 849 return; 850 } 851 852 if (!prev_sp || prev_sp->GetNumFrames(false) == 0) { 853 #if defined(DEBUG_STACK_FRAMES) 854 s.PutCString("No previous frames, so use current frames...\n"); 855 #endif 856 // We either don't have any previous frames, or since we have more than one 857 // current frames it means we have all the frames and can safely replace 858 // our previous frames. 859 prev_sp.reset(curr_ap.release()); 860 return; 861 } 862 863 const uint32_t num_curr_frames = curr_ap->GetNumFrames(false); 864 865 if (num_curr_frames > 1) { 866 #if defined(DEBUG_STACK_FRAMES) 867 s.PutCString( 868 "We have more than one current frame, so use current frames...\n"); 869 #endif 870 // We have more than one current frames it means we have all the frames and 871 // can safely replace our previous frames. 872 prev_sp.reset(curr_ap.release()); 873 874 #if defined(DEBUG_STACK_FRAMES) 875 s.PutCString("\nMerged:\n"); 876 prev_sp->Dump(&s); 877 #endif 878 return; 879 } 880 881 StackFrameSP prev_frame_zero_sp(prev_sp->GetFrameAtIndex(0)); 882 StackFrameSP curr_frame_zero_sp(curr_ap->GetFrameAtIndex(0)); 883 StackID curr_stack_id(curr_frame_zero_sp->GetStackID()); 884 StackID prev_stack_id(prev_frame_zero_sp->GetStackID()); 885 886 #if defined(DEBUG_STACK_FRAMES) 887 const uint32_t num_prev_frames = prev_sp->GetNumFrames(false); 888 s.Printf("\n%u previous frames with one current frame\n", num_prev_frames); 889 #endif 890 891 // We have only a single current frame 892 // Our previous stack frames only had a single frame as well... 893 if (curr_stack_id == prev_stack_id) { 894 #if defined(DEBUG_STACK_FRAMES) 895 s.Printf("\nPrevious frame #0 is same as current frame #0, merge the " 896 "cached data\n"); 897 #endif 898 899 curr_frame_zero_sp->UpdateCurrentFrameFromPreviousFrame( 900 *prev_frame_zero_sp); 901 // prev_frame_zero_sp->UpdatePreviousFrameFromCurrentFrame 902 // (*curr_frame_zero_sp); 903 // prev_sp->SetFrameAtIndex (0, prev_frame_zero_sp); 904 } else if (curr_stack_id < prev_stack_id) { 905 #if defined(DEBUG_STACK_FRAMES) 906 s.Printf("\nCurrent frame #0 has a stack ID that is less than the previous " 907 "frame #0, insert current frame zero in front of previous\n"); 908 #endif 909 prev_sp->m_frames.insert(prev_sp->m_frames.begin(), curr_frame_zero_sp); 910 } 911 912 curr_ap.release(); 913 914 #if defined(DEBUG_STACK_FRAMES) 915 s.PutCString("\nMerged:\n"); 916 prev_sp->Dump(&s); 917 #endif 918 } 919 920 lldb::StackFrameSP 921 StackFrameList::GetStackFrameSPForStackFramePtr(StackFrame *stack_frame_ptr) { 922 const_iterator pos; 923 const_iterator begin = m_frames.begin(); 924 const_iterator end = m_frames.end(); 925 lldb::StackFrameSP ret_sp; 926 927 for (pos = begin; pos != end; ++pos) { 928 if (pos->get() == stack_frame_ptr) { 929 ret_sp = (*pos); 930 break; 931 } 932 } 933 return ret_sp; 934 } 935 936 size_t StackFrameList::GetStatus(Stream &strm, uint32_t first_frame, 937 uint32_t num_frames, bool show_frame_info, 938 uint32_t num_frames_with_source, 939 bool show_unique, 940 const char *selected_frame_marker) { 941 size_t num_frames_displayed = 0; 942 943 if (num_frames == 0) 944 return 0; 945 946 StackFrameSP frame_sp; 947 uint32_t frame_idx = 0; 948 uint32_t last_frame; 949 950 // Don't let the last frame wrap around... 951 if (num_frames == UINT32_MAX) 952 last_frame = UINT32_MAX; 953 else 954 last_frame = first_frame + num_frames; 955 956 StackFrameSP selected_frame_sp = m_thread.GetSelectedFrame(); 957 const char *unselected_marker = nullptr; 958 std::string buffer; 959 if (selected_frame_marker) { 960 size_t len = strlen(selected_frame_marker); 961 buffer.insert(buffer.begin(), len, ' '); 962 unselected_marker = buffer.c_str(); 963 } 964 const char *marker = nullptr; 965 966 for (frame_idx = first_frame; frame_idx < last_frame; ++frame_idx) { 967 frame_sp = GetFrameAtIndex(frame_idx); 968 if (!frame_sp) 969 break; 970 971 if (selected_frame_marker != nullptr) { 972 if (frame_sp == selected_frame_sp) 973 marker = selected_frame_marker; 974 else 975 marker = unselected_marker; 976 } 977 978 if (!frame_sp->GetStatus(strm, show_frame_info, 979 num_frames_with_source > (first_frame - frame_idx), 980 show_unique, marker)) 981 break; 982 ++num_frames_displayed; 983 } 984 985 strm.IndentLess(); 986 return num_frames_displayed; 987 } 988