1 //===-- StackFrameList.cpp ------------------------------------------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 9 #include "lldb/Target/StackFrameList.h" 10 #include "lldb/Breakpoint/Breakpoint.h" 11 #include "lldb/Breakpoint/BreakpointLocation.h" 12 #include "lldb/Core/SourceManager.h" 13 #include "lldb/Core/StreamFile.h" 14 #include "lldb/Symbol/Block.h" 15 #include "lldb/Symbol/Function.h" 16 #include "lldb/Symbol/Symbol.h" 17 #include "lldb/Target/Process.h" 18 #include "lldb/Target/RegisterContext.h" 19 #include "lldb/Target/StackFrame.h" 20 #include "lldb/Target/StopInfo.h" 21 #include "lldb/Target/Target.h" 22 #include "lldb/Target/Thread.h" 23 #include "lldb/Target/Unwind.h" 24 #include "lldb/Utility/Log.h" 25 #include "llvm/ADT/SmallPtrSet.h" 26 27 #include <memory> 28 29 //#define DEBUG_STACK_FRAMES 1 30 31 using namespace lldb; 32 using namespace lldb_private; 33 34 // StackFrameList constructor 35 StackFrameList::StackFrameList(Thread &thread, 36 const lldb::StackFrameListSP &prev_frames_sp, 37 bool show_inline_frames) 38 : m_thread(thread), m_prev_frames_sp(prev_frames_sp), m_mutex(), m_frames(), 39 m_selected_frame_idx(0), m_concrete_frames_fetched(0), 40 m_current_inlined_depth(UINT32_MAX), 41 m_current_inlined_pc(LLDB_INVALID_ADDRESS), 42 m_show_inlined_frames(show_inline_frames) { 43 if (prev_frames_sp) { 44 m_current_inlined_depth = prev_frames_sp->m_current_inlined_depth; 45 m_current_inlined_pc = prev_frames_sp->m_current_inlined_pc; 46 } 47 } 48 49 StackFrameList::~StackFrameList() { 50 // Call clear since this takes a lock and clears the stack frame list in case 51 // another thread is currently using this stack frame list 52 Clear(); 53 } 54 55 void StackFrameList::CalculateCurrentInlinedDepth() { 56 uint32_t cur_inlined_depth = GetCurrentInlinedDepth(); 57 if (cur_inlined_depth == UINT32_MAX) { 58 ResetCurrentInlinedDepth(); 59 } 60 } 61 62 uint32_t StackFrameList::GetCurrentInlinedDepth() { 63 if (m_show_inlined_frames && m_current_inlined_pc != LLDB_INVALID_ADDRESS) { 64 lldb::addr_t cur_pc = m_thread.GetRegisterContext()->GetPC(); 65 if (cur_pc != m_current_inlined_pc) { 66 m_current_inlined_pc = LLDB_INVALID_ADDRESS; 67 m_current_inlined_depth = UINT32_MAX; 68 Log *log(lldb_private::GetLogIfAllCategoriesSet(LIBLLDB_LOG_STEP)); 69 if (log && log->GetVerbose()) 70 LLDB_LOGF( 71 log, 72 "GetCurrentInlinedDepth: invalidating current inlined depth.\n"); 73 } 74 return m_current_inlined_depth; 75 } else { 76 return UINT32_MAX; 77 } 78 } 79 80 void StackFrameList::ResetCurrentInlinedDepth() { 81 if (!m_show_inlined_frames) 82 return; 83 84 std::lock_guard<std::recursive_mutex> guard(m_mutex); 85 86 GetFramesUpTo(0); 87 if (m_frames.empty()) 88 return; 89 if (!m_frames[0]->IsInlined()) { 90 m_current_inlined_depth = UINT32_MAX; 91 m_current_inlined_pc = LLDB_INVALID_ADDRESS; 92 Log *log(lldb_private::GetLogIfAllCategoriesSet(LIBLLDB_LOG_STEP)); 93 if (log && log->GetVerbose()) 94 LLDB_LOGF( 95 log, 96 "ResetCurrentInlinedDepth: Invalidating current inlined depth.\n"); 97 return; 98 } 99 100 // We only need to do something special about inlined blocks when we are 101 // at the beginning of an inlined function: 102 // FIXME: We probably also have to do something special if the PC is at 103 // the END of an inlined function, which coincides with the end of either 104 // its containing function or another inlined function. 105 106 Block *block_ptr = m_frames[0]->GetFrameBlock(); 107 if (!block_ptr) 108 return; 109 110 Address pc_as_address; 111 lldb::addr_t curr_pc = m_thread.GetRegisterContext()->GetPC(); 112 pc_as_address.SetLoadAddress(curr_pc, &(m_thread.GetProcess()->GetTarget())); 113 AddressRange containing_range; 114 if (!block_ptr->GetRangeContainingAddress(pc_as_address, containing_range) || 115 pc_as_address != containing_range.GetBaseAddress()) 116 return; 117 118 // If we got here because of a breakpoint hit, then set the inlined depth 119 // depending on where the breakpoint was set. If we got here because of a 120 // crash, then set the inlined depth to the deepest most block. Otherwise, 121 // we stopped here naturally as the result of a step, so set ourselves in the 122 // containing frame of the whole set of nested inlines, so the user can then 123 // "virtually" step into the frames one by one, or next over the whole mess. 124 // Note: We don't have to handle being somewhere in the middle of the stack 125 // here, since ResetCurrentInlinedDepth doesn't get called if there is a 126 // valid inlined depth set. 127 StopInfoSP stop_info_sp = m_thread.GetStopInfo(); 128 if (!stop_info_sp) 129 return; 130 switch (stop_info_sp->GetStopReason()) { 131 case eStopReasonWatchpoint: 132 case eStopReasonException: 133 case eStopReasonExec: 134 case eStopReasonSignal: 135 // In all these cases we want to stop in the deepest frame. 136 m_current_inlined_pc = curr_pc; 137 m_current_inlined_depth = 0; 138 break; 139 case eStopReasonBreakpoint: { 140 // FIXME: Figure out what this break point is doing, and set the inline 141 // depth appropriately. Be careful to take into account breakpoints that 142 // implement step over prologue, since that should do the default 143 // calculation. For now, if the breakpoints corresponding to this hit are 144 // all internal, I set the stop location to the top of the inlined stack, 145 // since that will make things like stepping over prologues work right. 146 // But if there are any non-internal breakpoints I do to the bottom of the 147 // stack, since that was the old behavior. 148 uint32_t bp_site_id = stop_info_sp->GetValue(); 149 BreakpointSiteSP bp_site_sp( 150 m_thread.GetProcess()->GetBreakpointSiteList().FindByID(bp_site_id)); 151 bool all_internal = true; 152 if (bp_site_sp) { 153 uint32_t num_owners = bp_site_sp->GetNumberOfOwners(); 154 for (uint32_t i = 0; i < num_owners; i++) { 155 Breakpoint &bp_ref = bp_site_sp->GetOwnerAtIndex(i)->GetBreakpoint(); 156 if (!bp_ref.IsInternal()) { 157 all_internal = false; 158 } 159 } 160 } 161 if (!all_internal) { 162 m_current_inlined_pc = curr_pc; 163 m_current_inlined_depth = 0; 164 break; 165 } 166 } 167 LLVM_FALLTHROUGH; 168 default: { 169 // Otherwise, we should set ourselves at the container of the inlining, so 170 // that the user can descend into them. So first we check whether we have 171 // more than one inlined block sharing this PC: 172 int num_inlined_functions = 0; 173 174 for (Block *container_ptr = block_ptr->GetInlinedParent(); 175 container_ptr != nullptr; 176 container_ptr = container_ptr->GetInlinedParent()) { 177 if (!container_ptr->GetRangeContainingAddress(pc_as_address, 178 containing_range)) 179 break; 180 if (pc_as_address != containing_range.GetBaseAddress()) 181 break; 182 183 num_inlined_functions++; 184 } 185 m_current_inlined_pc = curr_pc; 186 m_current_inlined_depth = num_inlined_functions + 1; 187 Log *log(lldb_private::GetLogIfAllCategoriesSet(LIBLLDB_LOG_STEP)); 188 if (log && log->GetVerbose()) 189 LLDB_LOGF(log, 190 "ResetCurrentInlinedDepth: setting inlined " 191 "depth: %d 0x%" PRIx64 ".\n", 192 m_current_inlined_depth, curr_pc); 193 194 break; 195 } 196 } 197 } 198 199 bool StackFrameList::DecrementCurrentInlinedDepth() { 200 if (m_show_inlined_frames) { 201 uint32_t current_inlined_depth = GetCurrentInlinedDepth(); 202 if (current_inlined_depth != UINT32_MAX) { 203 if (current_inlined_depth > 0) { 204 m_current_inlined_depth--; 205 return true; 206 } 207 } 208 } 209 return false; 210 } 211 212 void StackFrameList::SetCurrentInlinedDepth(uint32_t new_depth) { 213 m_current_inlined_depth = new_depth; 214 if (new_depth == UINT32_MAX) 215 m_current_inlined_pc = LLDB_INVALID_ADDRESS; 216 else 217 m_current_inlined_pc = m_thread.GetRegisterContext()->GetPC(); 218 } 219 220 void StackFrameList::GetOnlyConcreteFramesUpTo(uint32_t end_idx, 221 Unwind &unwinder) { 222 assert(m_thread.IsValid() && "Expected valid thread"); 223 assert(m_frames.size() <= end_idx && "Expected there to be frames to fill"); 224 225 if (end_idx < m_concrete_frames_fetched) 226 return; 227 228 uint32_t num_frames = unwinder.GetFramesUpTo(end_idx); 229 if (num_frames <= end_idx + 1) { 230 // Done unwinding. 231 m_concrete_frames_fetched = UINT32_MAX; 232 } 233 234 // Don't create the frames eagerly. Defer this work to GetFrameAtIndex, 235 // which can lazily query the unwinder to create frames. 236 m_frames.resize(num_frames); 237 } 238 239 /// A sequence of calls that comprise some portion of a backtrace. Each frame 240 /// is represented as a pair of a callee (Function *) and an address within the 241 /// callee. 242 using CallSequence = std::vector<std::pair<Function *, addr_t>>; 243 244 /// Find the unique path through the call graph from \p begin (with return PC 245 /// \p return_pc) to \p end. On success this path is stored into \p path, and 246 /// on failure \p path is unchanged. 247 static void FindInterveningFrames(Function &begin, Function &end, 248 ExecutionContext &exe_ctx, Target &target, 249 addr_t return_pc, CallSequence &path, 250 ModuleList &images, Log *log) { 251 LLDB_LOG(log, "Finding frames between {0} and {1}, retn-pc={2:x}", 252 begin.GetDisplayName(), end.GetDisplayName(), return_pc); 253 254 // Find a non-tail calling edge with the correct return PC. 255 if (log) 256 for (const auto &edge : begin.GetCallEdges()) 257 LLDB_LOG(log, "FindInterveningFrames: found call with retn-PC = {0:x}", 258 edge->GetReturnPCAddress(begin, target)); 259 CallEdge *first_edge = begin.GetCallEdgeForReturnAddress(return_pc, target); 260 if (!first_edge) { 261 LLDB_LOG(log, "No call edge outgoing from {0} with retn-PC == {1:x}", 262 begin.GetDisplayName(), return_pc); 263 return; 264 } 265 266 // The first callee may not be resolved, or there may be nothing to fill in. 267 Function *first_callee = first_edge->GetCallee(images, exe_ctx); 268 if (!first_callee) { 269 LLDB_LOG(log, "Could not resolve callee"); 270 return; 271 } 272 if (first_callee == &end) { 273 LLDB_LOG(log, "Not searching further, first callee is {0} (retn-PC: {1:x})", 274 end.GetDisplayName(), return_pc); 275 return; 276 } 277 278 // Run DFS on the tail-calling edges out of the first callee to find \p end. 279 // Fully explore the set of functions reachable from the first edge via tail 280 // calls in order to detect ambiguous executions. 281 struct DFS { 282 CallSequence active_path = {}; 283 CallSequence solution_path = {}; 284 llvm::SmallPtrSet<Function *, 2> visited_nodes = {}; 285 bool ambiguous = false; 286 Function *end; 287 ModuleList &images; 288 Target ⌖ 289 ExecutionContext &context; 290 291 DFS(Function *end, ModuleList &images, Target &target, 292 ExecutionContext &context) 293 : end(end), images(images), target(target), context(context) {} 294 295 void search(CallEdge &first_edge, Function &first_callee, 296 CallSequence &path) { 297 dfs(first_edge, first_callee); 298 if (!ambiguous) 299 path = std::move(solution_path); 300 } 301 302 void dfs(CallEdge ¤t_edge, Function &callee) { 303 // Found a path to the target function. 304 if (&callee == end) { 305 if (solution_path.empty()) 306 solution_path = active_path; 307 else 308 ambiguous = true; 309 return; 310 } 311 312 // Terminate the search if tail recursion is found, or more generally if 313 // there's more than one way to reach a target. This errs on the side of 314 // caution: it conservatively stops searching when some solutions are 315 // still possible to save time in the average case. 316 if (!visited_nodes.insert(&callee).second) { 317 ambiguous = true; 318 return; 319 } 320 321 // Search the calls made from this callee. 322 active_path.emplace_back(&callee, LLDB_INVALID_ADDRESS); 323 for (const auto &edge : callee.GetTailCallingEdges()) { 324 Function *next_callee = edge->GetCallee(images, context); 325 if (!next_callee) 326 continue; 327 328 addr_t tail_call_pc = edge->GetCallInstPC(callee, target); 329 active_path.back().second = tail_call_pc; 330 331 dfs(*edge, *next_callee); 332 if (ambiguous) 333 return; 334 } 335 active_path.pop_back(); 336 } 337 }; 338 339 DFS(&end, images, target, exe_ctx).search(*first_edge, *first_callee, path); 340 } 341 342 /// Given that \p next_frame will be appended to the frame list, synthesize 343 /// tail call frames between the current end of the list and \p next_frame. 344 /// If any frames are added, adjust the frame index of \p next_frame. 345 /// 346 /// -------------- 347 /// | ... | <- Completed frames. 348 /// -------------- 349 /// | prev_frame | 350 /// -------------- 351 /// | ... | <- Artificial frames inserted here. 352 /// -------------- 353 /// | next_frame | 354 /// -------------- 355 /// | ... | <- Not-yet-visited frames. 356 /// -------------- 357 void StackFrameList::SynthesizeTailCallFrames(StackFrame &next_frame) { 358 // Cannot synthesize tail call frames when the stack is empty (there is no 359 // "previous" frame). 360 if (m_frames.empty()) 361 return; 362 363 TargetSP target_sp = next_frame.CalculateTarget(); 364 if (!target_sp) 365 return; 366 367 lldb::RegisterContextSP next_reg_ctx_sp = next_frame.GetRegisterContext(); 368 if (!next_reg_ctx_sp) 369 return; 370 371 Log *log(lldb_private::GetLogIfAllCategoriesSet(LIBLLDB_LOG_STEP)); 372 373 StackFrame &prev_frame = *m_frames.back().get(); 374 375 // Find the functions prev_frame and next_frame are stopped in. The function 376 // objects are needed to search the lazy call graph for intervening frames. 377 Function *prev_func = 378 prev_frame.GetSymbolContext(eSymbolContextFunction).function; 379 if (!prev_func) { 380 LLDB_LOG(log, "SynthesizeTailCallFrames: can't find previous function"); 381 return; 382 } 383 Function *next_func = 384 next_frame.GetSymbolContext(eSymbolContextFunction).function; 385 if (!next_func) { 386 LLDB_LOG(log, "SynthesizeTailCallFrames: can't find next function"); 387 return; 388 } 389 390 // Try to find the unique sequence of (tail) calls which led from next_frame 391 // to prev_frame. 392 CallSequence path; 393 addr_t return_pc = next_reg_ctx_sp->GetPC(); 394 Target &target = *target_sp.get(); 395 ModuleList &images = next_frame.CalculateTarget()->GetImages(); 396 ExecutionContext exe_ctx(target_sp, /*get_process=*/true); 397 exe_ctx.SetFramePtr(&next_frame); 398 FindInterveningFrames(*next_func, *prev_func, exe_ctx, target, return_pc, 399 path, images, log); 400 401 // Push synthetic tail call frames. 402 for (auto calleeInfo : llvm::reverse(path)) { 403 Function *callee = calleeInfo.first; 404 uint32_t frame_idx = m_frames.size(); 405 uint32_t concrete_frame_idx = next_frame.GetConcreteFrameIndex(); 406 addr_t cfa = LLDB_INVALID_ADDRESS; 407 bool cfa_is_valid = false; 408 addr_t pc = calleeInfo.second; 409 // We do not want to subtract 1 from this PC, as it's the actual address 410 // of the tail-calling branch instruction. This address is provided by the 411 // compiler via DW_AT_call_pc. 412 constexpr bool behaves_like_zeroth_frame = true; 413 SymbolContext sc; 414 callee->CalculateSymbolContext(&sc); 415 auto synth_frame = std::make_shared<StackFrame>( 416 m_thread.shared_from_this(), frame_idx, concrete_frame_idx, cfa, 417 cfa_is_valid, pc, StackFrame::Kind::Artificial, 418 behaves_like_zeroth_frame, &sc); 419 m_frames.push_back(synth_frame); 420 LLDB_LOG(log, "Pushed frame {0} at {1:x}", callee->GetDisplayName(), pc); 421 } 422 423 // If any frames were created, adjust next_frame's index. 424 if (!path.empty()) 425 next_frame.SetFrameIndex(m_frames.size()); 426 } 427 428 void StackFrameList::GetFramesUpTo(uint32_t end_idx) { 429 // Do not fetch frames for an invalid thread. 430 if (!m_thread.IsValid()) 431 return; 432 433 // We've already gotten more frames than asked for, or we've already finished 434 // unwinding, return. 435 if (m_frames.size() > end_idx || GetAllFramesFetched()) 436 return; 437 438 Unwind &unwinder = m_thread.GetUnwinder(); 439 440 if (!m_show_inlined_frames) { 441 GetOnlyConcreteFramesUpTo(end_idx, unwinder); 442 return; 443 } 444 445 #if defined(DEBUG_STACK_FRAMES) 446 StreamFile s(stdout, false); 447 #endif 448 // If we are hiding some frames from the outside world, we need to add 449 // those onto the total count of frames to fetch. However, we don't need 450 // to do that if end_idx is 0 since in that case we always get the first 451 // concrete frame and all the inlined frames below it... And of course, if 452 // end_idx is UINT32_MAX that means get all, so just do that... 453 454 uint32_t inlined_depth = 0; 455 if (end_idx > 0 && end_idx != UINT32_MAX) { 456 inlined_depth = GetCurrentInlinedDepth(); 457 if (inlined_depth != UINT32_MAX) { 458 if (end_idx > 0) 459 end_idx += inlined_depth; 460 } 461 } 462 463 StackFrameSP unwind_frame_sp; 464 do { 465 uint32_t idx = m_concrete_frames_fetched++; 466 lldb::addr_t pc = LLDB_INVALID_ADDRESS; 467 lldb::addr_t cfa = LLDB_INVALID_ADDRESS; 468 bool behaves_like_zeroth_frame = (idx == 0); 469 if (idx == 0) { 470 // We might have already created frame zero, only create it if we need 471 // to. 472 if (m_frames.empty()) { 473 RegisterContextSP reg_ctx_sp(m_thread.GetRegisterContext()); 474 475 if (reg_ctx_sp) { 476 const bool success = unwinder.GetFrameInfoAtIndex( 477 idx, cfa, pc, behaves_like_zeroth_frame); 478 // There shouldn't be any way not to get the frame info for frame 479 // 0. But if the unwinder can't make one, lets make one by hand 480 // with the SP as the CFA and see if that gets any further. 481 if (!success) { 482 cfa = reg_ctx_sp->GetSP(); 483 pc = reg_ctx_sp->GetPC(); 484 } 485 486 unwind_frame_sp = std::make_shared<StackFrame>( 487 m_thread.shared_from_this(), m_frames.size(), idx, reg_ctx_sp, 488 cfa, pc, behaves_like_zeroth_frame, nullptr); 489 m_frames.push_back(unwind_frame_sp); 490 } 491 } else { 492 unwind_frame_sp = m_frames.front(); 493 cfa = unwind_frame_sp->m_id.GetCallFrameAddress(); 494 } 495 } else { 496 const bool success = 497 unwinder.GetFrameInfoAtIndex(idx, cfa, pc, behaves_like_zeroth_frame); 498 if (!success) { 499 // We've gotten to the end of the stack. 500 SetAllFramesFetched(); 501 break; 502 } 503 const bool cfa_is_valid = true; 504 unwind_frame_sp = std::make_shared<StackFrame>( 505 m_thread.shared_from_this(), m_frames.size(), idx, cfa, cfa_is_valid, 506 pc, StackFrame::Kind::Regular, behaves_like_zeroth_frame, nullptr); 507 508 // Create synthetic tail call frames between the previous frame and the 509 // newly-found frame. The new frame's index may change after this call, 510 // although its concrete index will stay the same. 511 SynthesizeTailCallFrames(*unwind_frame_sp.get()); 512 513 m_frames.push_back(unwind_frame_sp); 514 } 515 516 assert(unwind_frame_sp); 517 SymbolContext unwind_sc = unwind_frame_sp->GetSymbolContext( 518 eSymbolContextBlock | eSymbolContextFunction); 519 Block *unwind_block = unwind_sc.block; 520 if (unwind_block) { 521 Address curr_frame_address(unwind_frame_sp->GetFrameCodeAddress()); 522 TargetSP target_sp = m_thread.CalculateTarget(); 523 // Be sure to adjust the frame address to match the address that was 524 // used to lookup the symbol context above. If we are in the first 525 // concrete frame, then we lookup using the current address, else we 526 // decrement the address by one to get the correct location. 527 if (idx > 0) { 528 if (curr_frame_address.GetOffset() == 0) { 529 // If curr_frame_address points to the first address in a section 530 // then after adjustment it will point to an other section. In that 531 // case resolve the address again to the correct section plus 532 // offset form. 533 addr_t load_addr = curr_frame_address.GetOpcodeLoadAddress( 534 target_sp.get(), AddressClass::eCode); 535 curr_frame_address.SetOpcodeLoadAddress( 536 load_addr - 1, target_sp.get(), AddressClass::eCode); 537 } else { 538 curr_frame_address.Slide(-1); 539 } 540 } 541 542 SymbolContext next_frame_sc; 543 Address next_frame_address; 544 545 while (unwind_sc.GetParentOfInlinedScope( 546 curr_frame_address, next_frame_sc, next_frame_address)) { 547 next_frame_sc.line_entry.ApplyFileMappings(target_sp); 548 behaves_like_zeroth_frame = false; 549 StackFrameSP frame_sp(new StackFrame( 550 m_thread.shared_from_this(), m_frames.size(), idx, 551 unwind_frame_sp->GetRegisterContextSP(), cfa, next_frame_address, 552 behaves_like_zeroth_frame, &next_frame_sc)); 553 554 m_frames.push_back(frame_sp); 555 unwind_sc = next_frame_sc; 556 curr_frame_address = next_frame_address; 557 } 558 } 559 } while (m_frames.size() - 1 < end_idx); 560 561 // Don't try to merge till you've calculated all the frames in this stack. 562 if (GetAllFramesFetched() && m_prev_frames_sp) { 563 StackFrameList *prev_frames = m_prev_frames_sp.get(); 564 StackFrameList *curr_frames = this; 565 566 #if defined(DEBUG_STACK_FRAMES) 567 s.PutCString("\nprev_frames:\n"); 568 prev_frames->Dump(&s); 569 s.PutCString("\ncurr_frames:\n"); 570 curr_frames->Dump(&s); 571 s.EOL(); 572 #endif 573 size_t curr_frame_num, prev_frame_num; 574 575 for (curr_frame_num = curr_frames->m_frames.size(), 576 prev_frame_num = prev_frames->m_frames.size(); 577 curr_frame_num > 0 && prev_frame_num > 0; 578 --curr_frame_num, --prev_frame_num) { 579 const size_t curr_frame_idx = curr_frame_num - 1; 580 const size_t prev_frame_idx = prev_frame_num - 1; 581 StackFrameSP curr_frame_sp(curr_frames->m_frames[curr_frame_idx]); 582 StackFrameSP prev_frame_sp(prev_frames->m_frames[prev_frame_idx]); 583 584 #if defined(DEBUG_STACK_FRAMES) 585 s.Printf("\n\nCurr frame #%u ", curr_frame_idx); 586 if (curr_frame_sp) 587 curr_frame_sp->Dump(&s, true, false); 588 else 589 s.PutCString("NULL"); 590 s.Printf("\nPrev frame #%u ", prev_frame_idx); 591 if (prev_frame_sp) 592 prev_frame_sp->Dump(&s, true, false); 593 else 594 s.PutCString("NULL"); 595 #endif 596 597 StackFrame *curr_frame = curr_frame_sp.get(); 598 StackFrame *prev_frame = prev_frame_sp.get(); 599 600 if (curr_frame == nullptr || prev_frame == nullptr) 601 break; 602 603 // Check the stack ID to make sure they are equal. 604 if (curr_frame->GetStackID() != prev_frame->GetStackID()) 605 break; 606 607 prev_frame->UpdatePreviousFrameFromCurrentFrame(*curr_frame); 608 // Now copy the fixed up previous frame into the current frames so the 609 // pointer doesn't change. 610 m_frames[curr_frame_idx] = prev_frame_sp; 611 612 #if defined(DEBUG_STACK_FRAMES) 613 s.Printf("\n Copying previous frame to current frame"); 614 #endif 615 } 616 // We are done with the old stack frame list, we can release it now. 617 m_prev_frames_sp.reset(); 618 } 619 620 #if defined(DEBUG_STACK_FRAMES) 621 s.PutCString("\n\nNew frames:\n"); 622 Dump(&s); 623 s.EOL(); 624 #endif 625 } 626 627 uint32_t StackFrameList::GetNumFrames(bool can_create) { 628 std::lock_guard<std::recursive_mutex> guard(m_mutex); 629 630 if (can_create) 631 GetFramesUpTo(UINT32_MAX); 632 633 return GetVisibleStackFrameIndex(m_frames.size()); 634 } 635 636 void StackFrameList::Dump(Stream *s) { 637 if (s == nullptr) 638 return; 639 640 std::lock_guard<std::recursive_mutex> guard(m_mutex); 641 642 const_iterator pos, begin = m_frames.begin(), end = m_frames.end(); 643 for (pos = begin; pos != end; ++pos) { 644 StackFrame *frame = (*pos).get(); 645 s->Printf("%p: ", static_cast<void *>(frame)); 646 if (frame) { 647 frame->GetStackID().Dump(s); 648 frame->DumpUsingSettingsFormat(s); 649 } else 650 s->Printf("frame #%u", (uint32_t)std::distance(begin, pos)); 651 s->EOL(); 652 } 653 s->EOL(); 654 } 655 656 StackFrameSP StackFrameList::GetFrameAtIndex(uint32_t idx) { 657 StackFrameSP frame_sp; 658 std::lock_guard<std::recursive_mutex> guard(m_mutex); 659 uint32_t original_idx = idx; 660 661 uint32_t inlined_depth = GetCurrentInlinedDepth(); 662 if (inlined_depth != UINT32_MAX) 663 idx += inlined_depth; 664 665 if (idx < m_frames.size()) 666 frame_sp = m_frames[idx]; 667 668 if (frame_sp) 669 return frame_sp; 670 671 // GetFramesUpTo will fill m_frames with as many frames as you asked for, if 672 // there are that many. If there weren't then you asked for too many frames. 673 GetFramesUpTo(idx); 674 if (idx < m_frames.size()) { 675 if (m_show_inlined_frames) { 676 // When inline frames are enabled we actually create all the frames in 677 // GetFramesUpTo. 678 frame_sp = m_frames[idx]; 679 } else { 680 addr_t pc, cfa; 681 bool behaves_like_zeroth_frame = (idx == 0); 682 if (m_thread.GetUnwinder().GetFrameInfoAtIndex( 683 idx, cfa, pc, behaves_like_zeroth_frame)) { 684 const bool cfa_is_valid = true; 685 frame_sp = std::make_shared<StackFrame>( 686 m_thread.shared_from_this(), idx, idx, cfa, cfa_is_valid, pc, 687 StackFrame::Kind::Regular, behaves_like_zeroth_frame, nullptr); 688 689 Function *function = 690 frame_sp->GetSymbolContext(eSymbolContextFunction).function; 691 if (function) { 692 // When we aren't showing inline functions we always use the top 693 // most function block as the scope. 694 frame_sp->SetSymbolContextScope(&function->GetBlock(false)); 695 } else { 696 // Set the symbol scope from the symbol regardless if it is nullptr 697 // or valid. 698 frame_sp->SetSymbolContextScope( 699 frame_sp->GetSymbolContext(eSymbolContextSymbol).symbol); 700 } 701 SetFrameAtIndex(idx, frame_sp); 702 } 703 } 704 } else if (original_idx == 0) { 705 // There should ALWAYS be a frame at index 0. If something went wrong with 706 // the CurrentInlinedDepth such that there weren't as many frames as we 707 // thought taking that into account, then reset the current inlined depth 708 // and return the real zeroth frame. 709 if (m_frames.empty()) { 710 // Why do we have a thread with zero frames, that should not ever 711 // happen... 712 assert(!m_thread.IsValid() && "A valid thread has no frames."); 713 } else { 714 ResetCurrentInlinedDepth(); 715 frame_sp = m_frames[original_idx]; 716 } 717 } 718 719 return frame_sp; 720 } 721 722 StackFrameSP 723 StackFrameList::GetFrameWithConcreteFrameIndex(uint32_t unwind_idx) { 724 // First try assuming the unwind index is the same as the frame index. The 725 // unwind index is always greater than or equal to the frame index, so it is 726 // a good place to start. If we have inlined frames we might have 5 concrete 727 // frames (frame unwind indexes go from 0-4), but we might have 15 frames 728 // after we make all the inlined frames. Most of the time the unwind frame 729 // index (or the concrete frame index) is the same as the frame index. 730 uint32_t frame_idx = unwind_idx; 731 StackFrameSP frame_sp(GetFrameAtIndex(frame_idx)); 732 while (frame_sp) { 733 if (frame_sp->GetFrameIndex() == unwind_idx) 734 break; 735 frame_sp = GetFrameAtIndex(++frame_idx); 736 } 737 return frame_sp; 738 } 739 740 static bool CompareStackID(const StackFrameSP &stack_sp, 741 const StackID &stack_id) { 742 return stack_sp->GetStackID() < stack_id; 743 } 744 745 StackFrameSP StackFrameList::GetFrameWithStackID(const StackID &stack_id) { 746 StackFrameSP frame_sp; 747 748 if (stack_id.IsValid()) { 749 std::lock_guard<std::recursive_mutex> guard(m_mutex); 750 uint32_t frame_idx = 0; 751 // Do a binary search in case the stack frame is already in our cache 752 collection::const_iterator begin = m_frames.begin(); 753 collection::const_iterator end = m_frames.end(); 754 if (begin != end) { 755 collection::const_iterator pos = 756 std::lower_bound(begin, end, stack_id, CompareStackID); 757 if (pos != end) { 758 if ((*pos)->GetStackID() == stack_id) 759 return *pos; 760 } 761 } 762 do { 763 frame_sp = GetFrameAtIndex(frame_idx); 764 if (frame_sp && frame_sp->GetStackID() == stack_id) 765 break; 766 frame_idx++; 767 } while (frame_sp); 768 } 769 return frame_sp; 770 } 771 772 bool StackFrameList::SetFrameAtIndex(uint32_t idx, StackFrameSP &frame_sp) { 773 if (idx >= m_frames.size()) 774 m_frames.resize(idx + 1); 775 // Make sure allocation succeeded by checking bounds again 776 if (idx < m_frames.size()) { 777 m_frames[idx] = frame_sp; 778 return true; 779 } 780 return false; // resize failed, out of memory? 781 } 782 783 uint32_t StackFrameList::GetSelectedFrameIndex() const { 784 std::lock_guard<std::recursive_mutex> guard(m_mutex); 785 return m_selected_frame_idx; 786 } 787 788 uint32_t StackFrameList::SetSelectedFrame(lldb_private::StackFrame *frame) { 789 std::lock_guard<std::recursive_mutex> guard(m_mutex); 790 const_iterator pos; 791 const_iterator begin = m_frames.begin(); 792 const_iterator end = m_frames.end(); 793 m_selected_frame_idx = 0; 794 for (pos = begin; pos != end; ++pos) { 795 if (pos->get() == frame) { 796 m_selected_frame_idx = std::distance(begin, pos); 797 uint32_t inlined_depth = GetCurrentInlinedDepth(); 798 if (inlined_depth != UINT32_MAX) 799 m_selected_frame_idx -= inlined_depth; 800 break; 801 } 802 } 803 SetDefaultFileAndLineToSelectedFrame(); 804 return m_selected_frame_idx; 805 } 806 807 bool StackFrameList::SetSelectedFrameByIndex(uint32_t idx) { 808 std::lock_guard<std::recursive_mutex> guard(m_mutex); 809 StackFrameSP frame_sp(GetFrameAtIndex(idx)); 810 if (frame_sp) { 811 SetSelectedFrame(frame_sp.get()); 812 return true; 813 } else 814 return false; 815 } 816 817 void StackFrameList::SetDefaultFileAndLineToSelectedFrame() { 818 if (m_thread.GetID() == 819 m_thread.GetProcess()->GetThreadList().GetSelectedThread()->GetID()) { 820 StackFrameSP frame_sp(GetFrameAtIndex(GetSelectedFrameIndex())); 821 if (frame_sp) { 822 SymbolContext sc = frame_sp->GetSymbolContext(eSymbolContextLineEntry); 823 if (sc.line_entry.file) 824 m_thread.CalculateTarget()->GetSourceManager().SetDefaultFileAndLine( 825 sc.line_entry.file, sc.line_entry.line); 826 } 827 } 828 } 829 830 // The thread has been run, reset the number stack frames to zero so we can 831 // determine how many frames we have lazily. 832 void StackFrameList::Clear() { 833 std::lock_guard<std::recursive_mutex> guard(m_mutex); 834 m_frames.clear(); 835 m_concrete_frames_fetched = 0; 836 } 837 838 void StackFrameList::Merge(std::unique_ptr<StackFrameList> &curr_up, 839 lldb::StackFrameListSP &prev_sp) { 840 std::unique_lock<std::recursive_mutex> current_lock, previous_lock; 841 if (curr_up) 842 current_lock = std::unique_lock<std::recursive_mutex>(curr_up->m_mutex); 843 if (prev_sp) 844 previous_lock = std::unique_lock<std::recursive_mutex>(prev_sp->m_mutex); 845 846 #if defined(DEBUG_STACK_FRAMES) 847 StreamFile s(stdout, false); 848 s.PutCString("\n\nStackFrameList::Merge():\nPrev:\n"); 849 if (prev_sp) 850 prev_sp->Dump(&s); 851 else 852 s.PutCString("NULL"); 853 s.PutCString("\nCurr:\n"); 854 if (curr_up) 855 curr_up->Dump(&s); 856 else 857 s.PutCString("NULL"); 858 s.EOL(); 859 #endif 860 861 if (!curr_up || curr_up->GetNumFrames(false) == 0) { 862 #if defined(DEBUG_STACK_FRAMES) 863 s.PutCString("No current frames, leave previous frames alone...\n"); 864 #endif 865 curr_up.release(); 866 return; 867 } 868 869 if (!prev_sp || prev_sp->GetNumFrames(false) == 0) { 870 #if defined(DEBUG_STACK_FRAMES) 871 s.PutCString("No previous frames, so use current frames...\n"); 872 #endif 873 // We either don't have any previous frames, or since we have more than one 874 // current frames it means we have all the frames and can safely replace 875 // our previous frames. 876 prev_sp.reset(curr_up.release()); 877 return; 878 } 879 880 const uint32_t num_curr_frames = curr_up->GetNumFrames(false); 881 882 if (num_curr_frames > 1) { 883 #if defined(DEBUG_STACK_FRAMES) 884 s.PutCString( 885 "We have more than one current frame, so use current frames...\n"); 886 #endif 887 // We have more than one current frames it means we have all the frames and 888 // can safely replace our previous frames. 889 prev_sp.reset(curr_up.release()); 890 891 #if defined(DEBUG_STACK_FRAMES) 892 s.PutCString("\nMerged:\n"); 893 prev_sp->Dump(&s); 894 #endif 895 return; 896 } 897 898 StackFrameSP prev_frame_zero_sp(prev_sp->GetFrameAtIndex(0)); 899 StackFrameSP curr_frame_zero_sp(curr_up->GetFrameAtIndex(0)); 900 StackID curr_stack_id(curr_frame_zero_sp->GetStackID()); 901 StackID prev_stack_id(prev_frame_zero_sp->GetStackID()); 902 903 #if defined(DEBUG_STACK_FRAMES) 904 const uint32_t num_prev_frames = prev_sp->GetNumFrames(false); 905 s.Printf("\n%u previous frames with one current frame\n", num_prev_frames); 906 #endif 907 908 // We have only a single current frame 909 // Our previous stack frames only had a single frame as well... 910 if (curr_stack_id == prev_stack_id) { 911 #if defined(DEBUG_STACK_FRAMES) 912 s.Printf("\nPrevious frame #0 is same as current frame #0, merge the " 913 "cached data\n"); 914 #endif 915 916 curr_frame_zero_sp->UpdateCurrentFrameFromPreviousFrame( 917 *prev_frame_zero_sp); 918 // prev_frame_zero_sp->UpdatePreviousFrameFromCurrentFrame 919 // (*curr_frame_zero_sp); 920 // prev_sp->SetFrameAtIndex (0, prev_frame_zero_sp); 921 } else if (curr_stack_id < prev_stack_id) { 922 #if defined(DEBUG_STACK_FRAMES) 923 s.Printf("\nCurrent frame #0 has a stack ID that is less than the previous " 924 "frame #0, insert current frame zero in front of previous\n"); 925 #endif 926 prev_sp->m_frames.insert(prev_sp->m_frames.begin(), curr_frame_zero_sp); 927 } 928 929 curr_up.release(); 930 931 #if defined(DEBUG_STACK_FRAMES) 932 s.PutCString("\nMerged:\n"); 933 prev_sp->Dump(&s); 934 #endif 935 } 936 937 lldb::StackFrameSP 938 StackFrameList::GetStackFrameSPForStackFramePtr(StackFrame *stack_frame_ptr) { 939 const_iterator pos; 940 const_iterator begin = m_frames.begin(); 941 const_iterator end = m_frames.end(); 942 lldb::StackFrameSP ret_sp; 943 944 for (pos = begin; pos != end; ++pos) { 945 if (pos->get() == stack_frame_ptr) { 946 ret_sp = (*pos); 947 break; 948 } 949 } 950 return ret_sp; 951 } 952 953 size_t StackFrameList::GetStatus(Stream &strm, uint32_t first_frame, 954 uint32_t num_frames, bool show_frame_info, 955 uint32_t num_frames_with_source, 956 bool show_unique, 957 const char *selected_frame_marker) { 958 size_t num_frames_displayed = 0; 959 960 if (num_frames == 0) 961 return 0; 962 963 StackFrameSP frame_sp; 964 uint32_t frame_idx = 0; 965 uint32_t last_frame; 966 967 // Don't let the last frame wrap around... 968 if (num_frames == UINT32_MAX) 969 last_frame = UINT32_MAX; 970 else 971 last_frame = first_frame + num_frames; 972 973 StackFrameSP selected_frame_sp = m_thread.GetSelectedFrame(); 974 const char *unselected_marker = nullptr; 975 std::string buffer; 976 if (selected_frame_marker) { 977 size_t len = strlen(selected_frame_marker); 978 buffer.insert(buffer.begin(), len, ' '); 979 unselected_marker = buffer.c_str(); 980 } 981 const char *marker = nullptr; 982 983 for (frame_idx = first_frame; frame_idx < last_frame; ++frame_idx) { 984 frame_sp = GetFrameAtIndex(frame_idx); 985 if (!frame_sp) 986 break; 987 988 if (selected_frame_marker != nullptr) { 989 if (frame_sp == selected_frame_sp) 990 marker = selected_frame_marker; 991 else 992 marker = unselected_marker; 993 } 994 995 if (!frame_sp->GetStatus(strm, show_frame_info, 996 num_frames_with_source > (first_frame - frame_idx), 997 show_unique, marker)) 998 break; 999 ++num_frames_displayed; 1000 } 1001 1002 strm.IndentLess(); 1003 return num_frames_displayed; 1004 } 1005