1 //===-- StackFrameList.cpp --------------------------------------*- C++ -*-===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 10 // C Includes 11 // C++ Includes 12 // Other libraries and framework includes 13 // Project includes 14 #include "lldb/Target/StackFrameList.h" 15 #include "lldb/Breakpoint/Breakpoint.h" 16 #include "lldb/Breakpoint/BreakpointLocation.h" 17 #include "lldb/Core/SourceManager.h" 18 #include "lldb/Core/StreamFile.h" 19 #include "lldb/Symbol/Block.h" 20 #include "lldb/Symbol/Function.h" 21 #include "lldb/Symbol/Symbol.h" 22 #include "lldb/Target/Process.h" 23 #include "lldb/Target/RegisterContext.h" 24 #include "lldb/Target/StackFrame.h" 25 #include "lldb/Target/StopInfo.h" 26 #include "lldb/Target/Target.h" 27 #include "lldb/Target/Thread.h" 28 #include "lldb/Target/Unwind.h" 29 #include "lldb/Utility/Log.h" 30 31 //#define DEBUG_STACK_FRAMES 1 32 33 using namespace lldb; 34 using namespace lldb_private; 35 36 //---------------------------------------------------------------------- 37 // StackFrameList constructor 38 //---------------------------------------------------------------------- 39 StackFrameList::StackFrameList(Thread &thread, 40 const lldb::StackFrameListSP &prev_frames_sp, 41 bool show_inline_frames) 42 : m_thread(thread), m_prev_frames_sp(prev_frames_sp), m_mutex(), m_frames(), 43 m_selected_frame_idx(0), m_concrete_frames_fetched(0), 44 m_current_inlined_depth(UINT32_MAX), 45 m_current_inlined_pc(LLDB_INVALID_ADDRESS), 46 m_show_inlined_frames(show_inline_frames) { 47 if (prev_frames_sp) { 48 m_current_inlined_depth = prev_frames_sp->m_current_inlined_depth; 49 m_current_inlined_pc = prev_frames_sp->m_current_inlined_pc; 50 } 51 } 52 53 StackFrameList::~StackFrameList() { 54 // Call clear since this takes a lock and clears the stack frame list in case 55 // another thread is currently using this stack frame list 56 Clear(); 57 } 58 59 void StackFrameList::CalculateCurrentInlinedDepth() { 60 uint32_t cur_inlined_depth = GetCurrentInlinedDepth(); 61 if (cur_inlined_depth == UINT32_MAX) { 62 ResetCurrentInlinedDepth(); 63 } 64 } 65 66 uint32_t StackFrameList::GetCurrentInlinedDepth() { 67 if (m_show_inlined_frames && m_current_inlined_pc != LLDB_INVALID_ADDRESS) { 68 lldb::addr_t cur_pc = m_thread.GetRegisterContext()->GetPC(); 69 if (cur_pc != m_current_inlined_pc) { 70 m_current_inlined_pc = LLDB_INVALID_ADDRESS; 71 m_current_inlined_depth = UINT32_MAX; 72 Log *log(lldb_private::GetLogIfAllCategoriesSet(LIBLLDB_LOG_STEP)); 73 if (log && log->GetVerbose()) 74 log->Printf( 75 "GetCurrentInlinedDepth: invalidating current inlined depth.\n"); 76 } 77 return m_current_inlined_depth; 78 } else { 79 return UINT32_MAX; 80 } 81 } 82 83 void StackFrameList::ResetCurrentInlinedDepth() { 84 if (!m_show_inlined_frames) 85 return; 86 87 std::lock_guard<std::recursive_mutex> guard(m_mutex); 88 89 GetFramesUpTo(0); 90 if (m_frames.empty()) 91 return; 92 if (!m_frames[0]->IsInlined()) { 93 m_current_inlined_depth = UINT32_MAX; 94 m_current_inlined_pc = LLDB_INVALID_ADDRESS; 95 Log *log(lldb_private::GetLogIfAllCategoriesSet(LIBLLDB_LOG_STEP)); 96 if (log && log->GetVerbose()) 97 log->Printf( 98 "ResetCurrentInlinedDepth: Invalidating current inlined depth.\n"); 99 return; 100 } 101 102 // We only need to do something special about inlined blocks when we are 103 // at the beginning of an inlined function: 104 // FIXME: We probably also have to do something special if the PC is at 105 // the END of an inlined function, which coincides with the end of either 106 // its containing function or another inlined function. 107 108 Block *block_ptr = m_frames[0]->GetFrameBlock(); 109 if (!block_ptr) 110 return; 111 112 Address pc_as_address; 113 lldb::addr_t curr_pc = m_thread.GetRegisterContext()->GetPC(); 114 pc_as_address.SetLoadAddress(curr_pc, &(m_thread.GetProcess()->GetTarget())); 115 AddressRange containing_range; 116 if (!block_ptr->GetRangeContainingAddress(pc_as_address, containing_range) || 117 pc_as_address != containing_range.GetBaseAddress()) 118 return; 119 120 // If we got here because of a breakpoint hit, then set the inlined depth 121 // depending on where the breakpoint was set. If we got here because of a 122 // crash, then set the inlined depth to the deepest most block. Otherwise, 123 // we stopped here naturally as the result of a step, so set ourselves in the 124 // containing frame of the whole set of nested inlines, so the user can then 125 // "virtually" step into the frames one by one, or next over the whole mess. 126 // Note: We don't have to handle being somewhere in the middle of the stack 127 // here, since ResetCurrentInlinedDepth doesn't get called if there is a 128 // valid inlined depth set. 129 StopInfoSP stop_info_sp = m_thread.GetStopInfo(); 130 if (!stop_info_sp) 131 return; 132 switch (stop_info_sp->GetStopReason()) { 133 case eStopReasonWatchpoint: 134 case eStopReasonException: 135 case eStopReasonExec: 136 case eStopReasonSignal: 137 // In all these cases we want to stop in the deepest frame. 138 m_current_inlined_pc = curr_pc; 139 m_current_inlined_depth = 0; 140 break; 141 case eStopReasonBreakpoint: { 142 // FIXME: Figure out what this break point is doing, and set the inline 143 // depth appropriately. Be careful to take into account breakpoints that 144 // implement step over prologue, since that should do the default 145 // calculation. For now, if the breakpoints corresponding to this hit are 146 // all internal, I set the stop location to the top of the inlined stack, 147 // since that will make things like stepping over prologues work right. 148 // But if there are any non-internal breakpoints I do to the bottom of the 149 // stack, since that was the old behavior. 150 uint32_t bp_site_id = stop_info_sp->GetValue(); 151 BreakpointSiteSP bp_site_sp( 152 m_thread.GetProcess()->GetBreakpointSiteList().FindByID(bp_site_id)); 153 bool all_internal = true; 154 if (bp_site_sp) { 155 uint32_t num_owners = bp_site_sp->GetNumberOfOwners(); 156 for (uint32_t i = 0; i < num_owners; i++) { 157 Breakpoint &bp_ref = bp_site_sp->GetOwnerAtIndex(i)->GetBreakpoint(); 158 if (!bp_ref.IsInternal()) { 159 all_internal = false; 160 } 161 } 162 } 163 if (!all_internal) { 164 m_current_inlined_pc = curr_pc; 165 m_current_inlined_depth = 0; 166 break; 167 } 168 } 169 LLVM_FALLTHROUGH; 170 default: { 171 // Otherwise, we should set ourselves at the container of the inlining, so 172 // that the user can descend into them. So first we check whether we have 173 // more than one inlined block sharing this PC: 174 int num_inlined_functions = 0; 175 176 for (Block *container_ptr = block_ptr->GetInlinedParent(); 177 container_ptr != nullptr; 178 container_ptr = container_ptr->GetInlinedParent()) { 179 if (!container_ptr->GetRangeContainingAddress(pc_as_address, 180 containing_range)) 181 break; 182 if (pc_as_address != containing_range.GetBaseAddress()) 183 break; 184 185 num_inlined_functions++; 186 } 187 m_current_inlined_pc = curr_pc; 188 m_current_inlined_depth = num_inlined_functions + 1; 189 Log *log(lldb_private::GetLogIfAllCategoriesSet(LIBLLDB_LOG_STEP)); 190 if (log && log->GetVerbose()) 191 log->Printf("ResetCurrentInlinedDepth: setting inlined " 192 "depth: %d 0x%" PRIx64 ".\n", 193 m_current_inlined_depth, curr_pc); 194 195 break; 196 } 197 } 198 } 199 200 bool StackFrameList::DecrementCurrentInlinedDepth() { 201 if (m_show_inlined_frames) { 202 uint32_t current_inlined_depth = GetCurrentInlinedDepth(); 203 if (current_inlined_depth != UINT32_MAX) { 204 if (current_inlined_depth > 0) { 205 m_current_inlined_depth--; 206 return true; 207 } 208 } 209 } 210 return false; 211 } 212 213 void StackFrameList::SetCurrentInlinedDepth(uint32_t new_depth) { 214 m_current_inlined_depth = new_depth; 215 if (new_depth == UINT32_MAX) 216 m_current_inlined_pc = LLDB_INVALID_ADDRESS; 217 else 218 m_current_inlined_pc = m_thread.GetRegisterContext()->GetPC(); 219 } 220 221 void StackFrameList::GetOnlyConcreteFramesUpTo(uint32_t end_idx, 222 Unwind *unwinder) { 223 assert(m_thread.IsValid() && "Expected valid thread"); 224 assert(m_frames.size() <= end_idx && "Expected there to be frames to fill"); 225 226 if (end_idx < m_concrete_frames_fetched) 227 return; 228 229 if (!unwinder) 230 return; 231 232 uint32_t num_frames = unwinder->GetFramesUpTo(end_idx); 233 if (num_frames <= end_idx + 1) { 234 // Done unwinding. 235 m_concrete_frames_fetched = UINT32_MAX; 236 } 237 m_frames.resize(num_frames); 238 } 239 240 void StackFrameList::GetFramesUpTo(uint32_t end_idx) { 241 // Do not fetch frames for an invalid thread. 242 if (!m_thread.IsValid()) 243 return; 244 245 // We've already gotten more frames than asked for, or we've already finished 246 // unwinding, return. 247 if (m_frames.size() > end_idx || GetAllFramesFetched()) 248 return; 249 250 Unwind *unwinder = m_thread.GetUnwinder(); 251 252 if (!m_show_inlined_frames) { 253 GetOnlyConcreteFramesUpTo(end_idx, unwinder); 254 return; 255 } 256 257 #if defined(DEBUG_STACK_FRAMES) 258 StreamFile s(stdout, false); 259 #endif 260 // If we are hiding some frames from the outside world, we need to add 261 // those onto the total count of frames to fetch. However, we don't need 262 // to do that if end_idx is 0 since in that case we always get the first 263 // concrete frame and all the inlined frames below it... And of course, if 264 // end_idx is UINT32_MAX that means get all, so just do that... 265 266 uint32_t inlined_depth = 0; 267 if (end_idx > 0 && end_idx != UINT32_MAX) { 268 inlined_depth = GetCurrentInlinedDepth(); 269 if (inlined_depth != UINT32_MAX) { 270 if (end_idx > 0) 271 end_idx += inlined_depth; 272 } 273 } 274 275 StackFrameSP unwind_frame_sp; 276 do { 277 uint32_t idx = m_concrete_frames_fetched++; 278 lldb::addr_t pc = LLDB_INVALID_ADDRESS; 279 lldb::addr_t cfa = LLDB_INVALID_ADDRESS; 280 if (idx == 0) { 281 // We might have already created frame zero, only create it if we need 282 // to. 283 if (m_frames.empty()) { 284 RegisterContextSP reg_ctx_sp(m_thread.GetRegisterContext()); 285 286 if (reg_ctx_sp) { 287 const bool success = 288 unwinder && unwinder->GetFrameInfoAtIndex(idx, cfa, pc); 289 // There shouldn't be any way not to get the frame info for frame 290 // 0. But if the unwinder can't make one, lets make one by hand 291 // with the SP as the CFA and see if that gets any further. 292 if (!success) { 293 cfa = reg_ctx_sp->GetSP(); 294 pc = reg_ctx_sp->GetPC(); 295 } 296 297 unwind_frame_sp.reset(new StackFrame(m_thread.shared_from_this(), 298 m_frames.size(), idx, reg_ctx_sp, 299 cfa, pc, nullptr)); 300 m_frames.push_back(unwind_frame_sp); 301 } 302 } else { 303 unwind_frame_sp = m_frames.front(); 304 cfa = unwind_frame_sp->m_id.GetCallFrameAddress(); 305 } 306 } else { 307 const bool success = 308 unwinder && unwinder->GetFrameInfoAtIndex(idx, cfa, pc); 309 if (!success) { 310 // We've gotten to the end of the stack. 311 SetAllFramesFetched(); 312 break; 313 } 314 const bool cfa_is_valid = true; 315 const bool stop_id_is_valid = false; 316 const bool is_history_frame = false; 317 unwind_frame_sp.reset(new StackFrame( 318 m_thread.shared_from_this(), m_frames.size(), idx, cfa, cfa_is_valid, 319 pc, 0, stop_id_is_valid, is_history_frame, nullptr)); 320 m_frames.push_back(unwind_frame_sp); 321 } 322 323 assert(unwind_frame_sp); 324 SymbolContext unwind_sc = unwind_frame_sp->GetSymbolContext( 325 eSymbolContextBlock | eSymbolContextFunction); 326 Block *unwind_block = unwind_sc.block; 327 if (unwind_block) { 328 Address curr_frame_address(unwind_frame_sp->GetFrameCodeAddress()); 329 TargetSP target_sp = m_thread.CalculateTarget(); 330 // Be sure to adjust the frame address to match the address that was 331 // used to lookup the symbol context above. If we are in the first 332 // concrete frame, then we lookup using the current address, else we 333 // decrement the address by one to get the correct location. 334 if (idx > 0) { 335 if (curr_frame_address.GetOffset() == 0) { 336 // If curr_frame_address points to the first address in a section 337 // then after adjustment it will point to an other section. In that 338 // case resolve the address again to the correct section plus 339 // offset form. 340 addr_t load_addr = curr_frame_address.GetOpcodeLoadAddress( 341 target_sp.get(), AddressClass::eCode); 342 curr_frame_address.SetOpcodeLoadAddress( 343 load_addr - 1, target_sp.get(), AddressClass::eCode); 344 } else { 345 curr_frame_address.Slide(-1); 346 } 347 } 348 349 SymbolContext next_frame_sc; 350 Address next_frame_address; 351 352 while (unwind_sc.GetParentOfInlinedScope( 353 curr_frame_address, next_frame_sc, next_frame_address)) { 354 next_frame_sc.line_entry.ApplyFileMappings(target_sp); 355 StackFrameSP frame_sp( 356 new StackFrame(m_thread.shared_from_this(), m_frames.size(), idx, 357 unwind_frame_sp->GetRegisterContextSP(), cfa, 358 next_frame_address, &next_frame_sc)); 359 360 m_frames.push_back(frame_sp); 361 unwind_sc = next_frame_sc; 362 curr_frame_address = next_frame_address; 363 } 364 } 365 } while (m_frames.size() - 1 < end_idx); 366 367 // Don't try to merge till you've calculated all the frames in this stack. 368 if (GetAllFramesFetched() && m_prev_frames_sp) { 369 StackFrameList *prev_frames = m_prev_frames_sp.get(); 370 StackFrameList *curr_frames = this; 371 372 #if defined(DEBUG_STACK_FRAMES) 373 s.PutCString("\nprev_frames:\n"); 374 prev_frames->Dump(&s); 375 s.PutCString("\ncurr_frames:\n"); 376 curr_frames->Dump(&s); 377 s.EOL(); 378 #endif 379 size_t curr_frame_num, prev_frame_num; 380 381 for (curr_frame_num = curr_frames->m_frames.size(), 382 prev_frame_num = prev_frames->m_frames.size(); 383 curr_frame_num > 0 && prev_frame_num > 0; 384 --curr_frame_num, --prev_frame_num) { 385 const size_t curr_frame_idx = curr_frame_num - 1; 386 const size_t prev_frame_idx = prev_frame_num - 1; 387 StackFrameSP curr_frame_sp(curr_frames->m_frames[curr_frame_idx]); 388 StackFrameSP prev_frame_sp(prev_frames->m_frames[prev_frame_idx]); 389 390 #if defined(DEBUG_STACK_FRAMES) 391 s.Printf("\n\nCurr frame #%u ", curr_frame_idx); 392 if (curr_frame_sp) 393 curr_frame_sp->Dump(&s, true, false); 394 else 395 s.PutCString("NULL"); 396 s.Printf("\nPrev frame #%u ", prev_frame_idx); 397 if (prev_frame_sp) 398 prev_frame_sp->Dump(&s, true, false); 399 else 400 s.PutCString("NULL"); 401 #endif 402 403 StackFrame *curr_frame = curr_frame_sp.get(); 404 StackFrame *prev_frame = prev_frame_sp.get(); 405 406 if (curr_frame == nullptr || prev_frame == nullptr) 407 break; 408 409 // Check the stack ID to make sure they are equal. 410 if (curr_frame->GetStackID() != prev_frame->GetStackID()) 411 break; 412 413 prev_frame->UpdatePreviousFrameFromCurrentFrame(*curr_frame); 414 // Now copy the fixed up previous frame into the current frames so the 415 // pointer doesn't change. 416 m_frames[curr_frame_idx] = prev_frame_sp; 417 418 #if defined(DEBUG_STACK_FRAMES) 419 s.Printf("\n Copying previous frame to current frame"); 420 #endif 421 } 422 // We are done with the old stack frame list, we can release it now. 423 m_prev_frames_sp.reset(); 424 } 425 426 #if defined(DEBUG_STACK_FRAMES) 427 s.PutCString("\n\nNew frames:\n"); 428 Dump(&s); 429 s.EOL(); 430 #endif 431 } 432 433 uint32_t StackFrameList::GetNumFrames(bool can_create) { 434 std::lock_guard<std::recursive_mutex> guard(m_mutex); 435 436 if (can_create) 437 GetFramesUpTo(UINT32_MAX); 438 439 uint32_t inlined_depth = GetCurrentInlinedDepth(); 440 if (inlined_depth == UINT32_MAX) 441 return m_frames.size(); 442 else 443 return m_frames.size() - inlined_depth; 444 } 445 446 void StackFrameList::Dump(Stream *s) { 447 if (s == nullptr) 448 return; 449 450 std::lock_guard<std::recursive_mutex> guard(m_mutex); 451 452 const_iterator pos, begin = m_frames.begin(), end = m_frames.end(); 453 for (pos = begin; pos != end; ++pos) { 454 StackFrame *frame = (*pos).get(); 455 s->Printf("%p: ", static_cast<void *>(frame)); 456 if (frame) { 457 frame->GetStackID().Dump(s); 458 frame->DumpUsingSettingsFormat(s); 459 } else 460 s->Printf("frame #%u", (uint32_t)std::distance(begin, pos)); 461 s->EOL(); 462 } 463 s->EOL(); 464 } 465 466 StackFrameSP StackFrameList::GetFrameAtIndex(uint32_t idx) { 467 StackFrameSP frame_sp; 468 std::lock_guard<std::recursive_mutex> guard(m_mutex); 469 uint32_t original_idx = idx; 470 471 uint32_t inlined_depth = GetCurrentInlinedDepth(); 472 if (inlined_depth != UINT32_MAX) 473 idx += inlined_depth; 474 475 if (idx < m_frames.size()) 476 frame_sp = m_frames[idx]; 477 478 if (frame_sp) 479 return frame_sp; 480 481 // GetFramesUpTo will fill m_frames with as many frames as you asked for, if 482 // there are that many. If there weren't then you asked for too many frames. 483 GetFramesUpTo(idx); 484 if (idx < m_frames.size()) { 485 if (m_show_inlined_frames) { 486 // When inline frames are enabled we actually create all the frames in 487 // GetFramesUpTo. 488 frame_sp = m_frames[idx]; 489 } else { 490 Unwind *unwinder = m_thread.GetUnwinder(); 491 if (unwinder) { 492 addr_t pc, cfa; 493 if (unwinder->GetFrameInfoAtIndex(idx, cfa, pc)) { 494 const bool cfa_is_valid = true; 495 const bool stop_id_is_valid = false; 496 const bool is_history_frame = false; 497 frame_sp.reset(new StackFrame( 498 m_thread.shared_from_this(), idx, idx, cfa, cfa_is_valid, pc, 0, 499 stop_id_is_valid, is_history_frame, nullptr)); 500 501 Function *function = 502 frame_sp->GetSymbolContext(eSymbolContextFunction).function; 503 if (function) { 504 // When we aren't showing inline functions we always use the top 505 // most function block as the scope. 506 frame_sp->SetSymbolContextScope(&function->GetBlock(false)); 507 } else { 508 // Set the symbol scope from the symbol regardless if it is nullptr 509 // or valid. 510 frame_sp->SetSymbolContextScope( 511 frame_sp->GetSymbolContext(eSymbolContextSymbol).symbol); 512 } 513 SetFrameAtIndex(idx, frame_sp); 514 } 515 } 516 } 517 } else if (original_idx == 0) { 518 // There should ALWAYS be a frame at index 0. If something went wrong with 519 // the CurrentInlinedDepth such that there weren't as many frames as we 520 // thought taking that into account, then reset the current inlined depth 521 // and return the real zeroth frame. 522 if (m_frames.empty()) { 523 // Why do we have a thread with zero frames, that should not ever 524 // happen... 525 assert(!m_thread.IsValid() && "A valid thread has no frames."); 526 } else { 527 ResetCurrentInlinedDepth(); 528 frame_sp = m_frames[original_idx]; 529 } 530 } 531 532 return frame_sp; 533 } 534 535 StackFrameSP 536 StackFrameList::GetFrameWithConcreteFrameIndex(uint32_t unwind_idx) { 537 // First try assuming the unwind index is the same as the frame index. The 538 // unwind index is always greater than or equal to the frame index, so it is 539 // a good place to start. If we have inlined frames we might have 5 concrete 540 // frames (frame unwind indexes go from 0-4), but we might have 15 frames 541 // after we make all the inlined frames. Most of the time the unwind frame 542 // index (or the concrete frame index) is the same as the frame index. 543 uint32_t frame_idx = unwind_idx; 544 StackFrameSP frame_sp(GetFrameAtIndex(frame_idx)); 545 while (frame_sp) { 546 if (frame_sp->GetFrameIndex() == unwind_idx) 547 break; 548 frame_sp = GetFrameAtIndex(++frame_idx); 549 } 550 return frame_sp; 551 } 552 553 static bool CompareStackID(const StackFrameSP &stack_sp, 554 const StackID &stack_id) { 555 return stack_sp->GetStackID() < stack_id; 556 } 557 558 StackFrameSP StackFrameList::GetFrameWithStackID(const StackID &stack_id) { 559 StackFrameSP frame_sp; 560 561 if (stack_id.IsValid()) { 562 std::lock_guard<std::recursive_mutex> guard(m_mutex); 563 uint32_t frame_idx = 0; 564 // Do a binary search in case the stack frame is already in our cache 565 collection::const_iterator begin = m_frames.begin(); 566 collection::const_iterator end = m_frames.end(); 567 if (begin != end) { 568 collection::const_iterator pos = 569 std::lower_bound(begin, end, stack_id, CompareStackID); 570 if (pos != end) { 571 if ((*pos)->GetStackID() == stack_id) 572 return *pos; 573 } 574 575 // if (m_frames.back()->GetStackID() < stack_id) 576 // frame_idx = m_frames.size(); 577 } 578 do { 579 frame_sp = GetFrameAtIndex(frame_idx); 580 if (frame_sp && frame_sp->GetStackID() == stack_id) 581 break; 582 frame_idx++; 583 } while (frame_sp); 584 } 585 return frame_sp; 586 } 587 588 bool StackFrameList::SetFrameAtIndex(uint32_t idx, StackFrameSP &frame_sp) { 589 if (idx >= m_frames.size()) 590 m_frames.resize(idx + 1); 591 // Make sure allocation succeeded by checking bounds again 592 if (idx < m_frames.size()) { 593 m_frames[idx] = frame_sp; 594 return true; 595 } 596 return false; // resize failed, out of memory? 597 } 598 599 uint32_t StackFrameList::GetSelectedFrameIndex() const { 600 std::lock_guard<std::recursive_mutex> guard(m_mutex); 601 return m_selected_frame_idx; 602 } 603 604 uint32_t StackFrameList::SetSelectedFrame(lldb_private::StackFrame *frame) { 605 std::lock_guard<std::recursive_mutex> guard(m_mutex); 606 const_iterator pos; 607 const_iterator begin = m_frames.begin(); 608 const_iterator end = m_frames.end(); 609 m_selected_frame_idx = 0; 610 for (pos = begin; pos != end; ++pos) { 611 if (pos->get() == frame) { 612 m_selected_frame_idx = std::distance(begin, pos); 613 uint32_t inlined_depth = GetCurrentInlinedDepth(); 614 if (inlined_depth != UINT32_MAX) 615 m_selected_frame_idx -= inlined_depth; 616 break; 617 } 618 } 619 SetDefaultFileAndLineToSelectedFrame(); 620 return m_selected_frame_idx; 621 } 622 623 // Mark a stack frame as the current frame using the frame index 624 bool StackFrameList::SetSelectedFrameByIndex(uint32_t idx) { 625 std::lock_guard<std::recursive_mutex> guard(m_mutex); 626 StackFrameSP frame_sp(GetFrameAtIndex(idx)); 627 if (frame_sp) { 628 SetSelectedFrame(frame_sp.get()); 629 return true; 630 } else 631 return false; 632 } 633 634 void StackFrameList::SetDefaultFileAndLineToSelectedFrame() { 635 if (m_thread.GetID() == 636 m_thread.GetProcess()->GetThreadList().GetSelectedThread()->GetID()) { 637 StackFrameSP frame_sp(GetFrameAtIndex(GetSelectedFrameIndex())); 638 if (frame_sp) { 639 SymbolContext sc = frame_sp->GetSymbolContext(eSymbolContextLineEntry); 640 if (sc.line_entry.file) 641 m_thread.CalculateTarget()->GetSourceManager().SetDefaultFileAndLine( 642 sc.line_entry.file, sc.line_entry.line); 643 } 644 } 645 } 646 647 // The thread has been run, reset the number stack frames to zero so we can 648 // determine how many frames we have lazily. 649 void StackFrameList::Clear() { 650 std::lock_guard<std::recursive_mutex> guard(m_mutex); 651 m_frames.clear(); 652 m_concrete_frames_fetched = 0; 653 } 654 655 void StackFrameList::InvalidateFrames(uint32_t start_idx) { 656 std::lock_guard<std::recursive_mutex> guard(m_mutex); 657 if (m_show_inlined_frames) { 658 Clear(); 659 } else { 660 const size_t num_frames = m_frames.size(); 661 while (start_idx < num_frames) { 662 m_frames[start_idx].reset(); 663 ++start_idx; 664 } 665 } 666 } 667 668 void StackFrameList::Merge(std::unique_ptr<StackFrameList> &curr_ap, 669 lldb::StackFrameListSP &prev_sp) { 670 std::unique_lock<std::recursive_mutex> current_lock, previous_lock; 671 if (curr_ap) 672 current_lock = std::unique_lock<std::recursive_mutex>(curr_ap->m_mutex); 673 if (prev_sp) 674 previous_lock = std::unique_lock<std::recursive_mutex>(prev_sp->m_mutex); 675 676 #if defined(DEBUG_STACK_FRAMES) 677 StreamFile s(stdout, false); 678 s.PutCString("\n\nStackFrameList::Merge():\nPrev:\n"); 679 if (prev_sp) 680 prev_sp->Dump(&s); 681 else 682 s.PutCString("NULL"); 683 s.PutCString("\nCurr:\n"); 684 if (curr_ap) 685 curr_ap->Dump(&s); 686 else 687 s.PutCString("NULL"); 688 s.EOL(); 689 #endif 690 691 if (!curr_ap || curr_ap->GetNumFrames(false) == 0) { 692 #if defined(DEBUG_STACK_FRAMES) 693 s.PutCString("No current frames, leave previous frames alone...\n"); 694 #endif 695 curr_ap.release(); 696 return; 697 } 698 699 if (!prev_sp || prev_sp->GetNumFrames(false) == 0) { 700 #if defined(DEBUG_STACK_FRAMES) 701 s.PutCString("No previous frames, so use current frames...\n"); 702 #endif 703 // We either don't have any previous frames, or since we have more than one 704 // current frames it means we have all the frames and can safely replace 705 // our previous frames. 706 prev_sp.reset(curr_ap.release()); 707 return; 708 } 709 710 const uint32_t num_curr_frames = curr_ap->GetNumFrames(false); 711 712 if (num_curr_frames > 1) { 713 #if defined(DEBUG_STACK_FRAMES) 714 s.PutCString( 715 "We have more than one current frame, so use current frames...\n"); 716 #endif 717 // We have more than one current frames it means we have all the frames and 718 // can safely replace our previous frames. 719 prev_sp.reset(curr_ap.release()); 720 721 #if defined(DEBUG_STACK_FRAMES) 722 s.PutCString("\nMerged:\n"); 723 prev_sp->Dump(&s); 724 #endif 725 return; 726 } 727 728 StackFrameSP prev_frame_zero_sp(prev_sp->GetFrameAtIndex(0)); 729 StackFrameSP curr_frame_zero_sp(curr_ap->GetFrameAtIndex(0)); 730 StackID curr_stack_id(curr_frame_zero_sp->GetStackID()); 731 StackID prev_stack_id(prev_frame_zero_sp->GetStackID()); 732 733 #if defined(DEBUG_STACK_FRAMES) 734 const uint32_t num_prev_frames = prev_sp->GetNumFrames(false); 735 s.Printf("\n%u previous frames with one current frame\n", num_prev_frames); 736 #endif 737 738 // We have only a single current frame 739 // Our previous stack frames only had a single frame as well... 740 if (curr_stack_id == prev_stack_id) { 741 #if defined(DEBUG_STACK_FRAMES) 742 s.Printf("\nPrevious frame #0 is same as current frame #0, merge the " 743 "cached data\n"); 744 #endif 745 746 curr_frame_zero_sp->UpdateCurrentFrameFromPreviousFrame( 747 *prev_frame_zero_sp); 748 // prev_frame_zero_sp->UpdatePreviousFrameFromCurrentFrame 749 // (*curr_frame_zero_sp); 750 // prev_sp->SetFrameAtIndex (0, prev_frame_zero_sp); 751 } else if (curr_stack_id < prev_stack_id) { 752 #if defined(DEBUG_STACK_FRAMES) 753 s.Printf("\nCurrent frame #0 has a stack ID that is less than the previous " 754 "frame #0, insert current frame zero in front of previous\n"); 755 #endif 756 prev_sp->m_frames.insert(prev_sp->m_frames.begin(), curr_frame_zero_sp); 757 } 758 759 curr_ap.release(); 760 761 #if defined(DEBUG_STACK_FRAMES) 762 s.PutCString("\nMerged:\n"); 763 prev_sp->Dump(&s); 764 #endif 765 } 766 767 lldb::StackFrameSP 768 StackFrameList::GetStackFrameSPForStackFramePtr(StackFrame *stack_frame_ptr) { 769 const_iterator pos; 770 const_iterator begin = m_frames.begin(); 771 const_iterator end = m_frames.end(); 772 lldb::StackFrameSP ret_sp; 773 774 for (pos = begin; pos != end; ++pos) { 775 if (pos->get() == stack_frame_ptr) { 776 ret_sp = (*pos); 777 break; 778 } 779 } 780 return ret_sp; 781 } 782 783 size_t StackFrameList::GetStatus(Stream &strm, uint32_t first_frame, 784 uint32_t num_frames, bool show_frame_info, 785 uint32_t num_frames_with_source, 786 bool show_unique, 787 const char *selected_frame_marker) { 788 size_t num_frames_displayed = 0; 789 790 if (num_frames == 0) 791 return 0; 792 793 StackFrameSP frame_sp; 794 uint32_t frame_idx = 0; 795 uint32_t last_frame; 796 797 // Don't let the last frame wrap around... 798 if (num_frames == UINT32_MAX) 799 last_frame = UINT32_MAX; 800 else 801 last_frame = first_frame + num_frames; 802 803 StackFrameSP selected_frame_sp = m_thread.GetSelectedFrame(); 804 const char *unselected_marker = nullptr; 805 std::string buffer; 806 if (selected_frame_marker) { 807 size_t len = strlen(selected_frame_marker); 808 buffer.insert(buffer.begin(), len, ' '); 809 unselected_marker = buffer.c_str(); 810 } 811 const char *marker = nullptr; 812 813 for (frame_idx = first_frame; frame_idx < last_frame; ++frame_idx) { 814 frame_sp = GetFrameAtIndex(frame_idx); 815 if (!frame_sp) 816 break; 817 818 if (selected_frame_marker != nullptr) { 819 if (frame_sp == selected_frame_sp) 820 marker = selected_frame_marker; 821 else 822 marker = unselected_marker; 823 } 824 825 if (!frame_sp->GetStatus(strm, show_frame_info, 826 num_frames_with_source > (first_frame - frame_idx), 827 show_unique, marker)) 828 break; 829 ++num_frames_displayed; 830 } 831 832 strm.IndentLess(); 833 return num_frames_displayed; 834 } 835